From de3ef8866ac97c7234f82ef739e3195bf8e67bc8 Mon Sep 17 00:00:00 2001 From: Chris Molozian Date: Fri, 29 Oct 2021 22:18:14 +0100 Subject: [PATCH] Upgrade GRPC, GRPC-Gateway, Protobuf, PGX, and other dependencies. --- CHANGELOG.md | 9 +- go.mod | 87 +- go.sum | 486 +- gtreap_compact/iterator.go | 152 - gtreap_compact/reader.go | 66 - gtreap_compact/store.go | 118 - gtreap_compact/writer.go | 77 - iap/iap.go | 2 +- server/api.go | 2 +- server/api_authenticate.go | 2 +- server/console.go | 2 +- server/console_authenticate.go | 2 +- server/match_common.go | 245 + server/match_registry.go | 132 +- server/matchmaker.go | 154 +- server/metrics.go | 4 +- server/pipeline_match.go | 11 +- server/runtime_javascript_init.go | 20 +- server/runtime_javascript_nakama.go | 2 +- server/runtime_lua_nakama.go | 2 +- social/social.go | 2 +- .../RoaringBitmap/roaring/.gitignore | 1 - .../RoaringBitmap/roaring/.travis.yml | 9 +- .../RoaringBitmap/roaring/CONTRIBUTORS | 4 +- .../github.com/RoaringBitmap/roaring/Makefile | 6 +- .../RoaringBitmap/roaring/README.md | 162 +- .../RoaringBitmap/roaring/arraycontainer.go | 106 +- .../roaring/arraycontainer_gen.go | 134 - .../RoaringBitmap/roaring/bitmapcontainer.go | 90 +- .../roaring/bitmapcontainer_gen.go | 415 -- .../RoaringBitmap/roaring/byte_input.go | 161 - .../RoaringBitmap/roaring/fastaggregation.go | 136 +- .../github.com/RoaringBitmap/roaring/go.mod | 16 - .../github.com/RoaringBitmap/roaring/go.sum | 30 - .../roaring/internal/byte_input.go | 166 + .../RoaringBitmap/roaring/internal/pools.go | 21 + .../RoaringBitmap/roaring/manyiterator.go | 14 + .../RoaringBitmap/roaring/parallel.go | 9 +- .../RoaringBitmap/roaring/roaring.go | 155 +- .../RoaringBitmap/roaring/roaringarray.go | 173 +- .../RoaringBitmap/roaring/roaringarray_gen.go | 529 -- .../RoaringBitmap/roaring/runcontainer.go | 608 +- .../RoaringBitmap/roaring/runcontainer_gen.go | 1104 --- .../RoaringBitmap/roaring/serialization.go | 15 - .../roaring/serialization_generic.go | 2 +- .../roaring/serialization_littleendian.go | 285 +- .../RoaringBitmap/roaring/setutil.go | 60 - .../RoaringBitmap/roaring/setutil_arm64.go | 6 + .../RoaringBitmap/roaring/setutil_arm64.s | 132 + .../RoaringBitmap/roaring/setutil_generic.go | 63 + .../github.com/RoaringBitmap/roaring/smat.go | 2 +- .../github.com/RoaringBitmap/roaring/util.go | 5 +- .../github.com/axiomhq/hyperloglog/.gitignore | 14 + vendor/github.com/axiomhq/hyperloglog/LICENSE | 21 + .../github.com/axiomhq/hyperloglog/README.md | 47 + .../axiomhq/hyperloglog/compressed.go | 180 + .../axiomhq/hyperloglog/hyperloglog.go | 424 ++ .../axiomhq/hyperloglog/registers.go | 114 + .../github.com/axiomhq/hyperloglog/sparse.go | 92 + .../github.com/axiomhq/hyperloglog/utils.go | 70 + .../bitset/.gitignore | 0 .../bitset/.travis.yml | 0 .../{willf => bits-and-blooms}/bitset/LICENSE | 0 .../bitset/README.md | 25 +- .../bitset/azure-pipelines.yml | 39 + .../bitset/bitset.go | 107 +- .../bitset/popcnt.go | 0 .../bitset/popcnt_19.go | 0 .../bitset/popcnt_amd64.go | 0 .../bitset/popcnt_amd64.s | 0 .../bitset/popcnt_generic.go | 0 .../bitset/trailing_zeros_18.go | 0 .../bitset/trailing_zeros_19.go | 0 .../blevesearch/bleve/v2/.travis.yml | 25 - .../blevesearch/bleve/v2/CONTRIBUTING.md | 16 - .../github.com/blevesearch/bleve/v2/README.md | 68 - .../v2/analysis/analyzer/standard/standard.go | 52 - .../v2/analysis/datetime/flexible/flexible.go | 64 - .../v2/analysis/datetime/optional/optional.go | 45 - .../blevesearch/bleve/v2/analysis/freq.go | 70 - .../bleve/v2/analysis/lang/en/analyzer_en.go | 70 - .../v2/analysis/lang/en/stop_filter_en.go | 33 - .../blevesearch/bleve/v2/builder.go | 94 - .../github.com/blevesearch/bleve/v2/config.go | 95 - vendor/github.com/blevesearch/bleve/v2/doc.go | 38 - .../blevesearch/bleve/v2/document/document.go | 130 - .../blevesearch/bleve/v2/document/field.go | 45 - .../bleve/v2/document/field_boolean.go | 137 - .../bleve/v2/document/field_composite.go | 135 - .../bleve/v2/document/field_datetime.go | 173 - .../bleve/v2/document/field_geopoint.go | 166 - .../bleve/v2/document/field_numeric.go | 159 - .../bleve/v2/document/field_text.go | 157 - .../github.com/blevesearch/bleve/v2/error.go | 50 - .../blevesearch/bleve/v2/geo/parse.go | 181 - vendor/github.com/blevesearch/bleve/v2/go.mod | 29 - vendor/github.com/blevesearch/bleve/v2/go.sum | 128 - .../github.com/blevesearch/bleve/v2/index.go | 308 - .../bleve/v2/index/scorch/README.md | 367 - .../bleve/v2/index/scorch/builder.go | 333 - .../bleve/v2/index/scorch/event.go | 64 - .../blevesearch/bleve/v2/index/scorch/int.go | 92 - .../bleve/v2/index/scorch/introducer.go | 449 -- .../bleve/v2/index/scorch/merge.go | 504 -- .../bleve/v2/index/scorch/persister.go | 990 --- .../bleve/v2/index/scorch/rollback.go | 212 - .../bleve/v2/index/scorch/scorch.go | 669 -- .../bleve/v2/index/scorch/segment_plugin.go | 133 - .../bleve/v2/index/scorch/snapshot_index.go | 764 -- .../v2/index/scorch/snapshot_index_doc.go | 80 - .../v2/index/scorch/snapshot_index_tfr.go | 188 - .../bleve/v2/index/scorch/snapshot_segment.go | 279 - .../bleve/v2/index/upsidedown/analysis.go | 129 - .../v2/index/upsidedown/benchmark_all.sh | 8 - .../bleve/v2/index/upsidedown/dump.go | 174 - .../bleve/v2/index/upsidedown/field_cache.go | 88 - .../bleve/v2/index/upsidedown/field_dict.go | 78 - .../bleve/v2/index/upsidedown/index_reader.go | 225 - .../bleve/v2/index/upsidedown/reader.go | 376 - .../bleve/v2/index/upsidedown/row.go | 1141 --- .../bleve/v2/index/upsidedown/row_merge.go | 76 - .../bleve/v2/index/upsidedown/stats.go | 55 - .../index/upsidedown/store/boltdb/iterator.go | 85 - .../index/upsidedown/store/boltdb/reader.go | 73 - .../v2/index/upsidedown/store/boltdb/store.go | 181 - .../index/upsidedown/store/boltdb/writer.go | 95 - .../index/upsidedown/store/gtreap/iterator.go | 152 - .../index/upsidedown/store/gtreap/reader.go | 66 - .../v2/index/upsidedown/store/gtreap/store.go | 82 - .../index/upsidedown/store/gtreap/writer.go | 76 - .../bleve/v2/index/upsidedown/upsidedown.go | 1069 --- .../v2/index/upsidedown/upsidedown.pb.go | 688 -- .../v2/index/upsidedown/upsidedown.proto | 14 - .../blevesearch/bleve/v2/index_alias.go | 37 - .../blevesearch/bleve/v2/index_alias_impl.go | 612 -- .../blevesearch/bleve/v2/index_impl.go | 912 --- .../blevesearch/bleve/v2/index_meta.go | 97 - .../blevesearch/bleve/v2/index_stats.go | 75 - .../blevesearch/bleve/v2/mapping.go | 65 - .../blevesearch/bleve/v2/mapping/analysis.go | 99 - .../blevesearch/bleve/v2/mapping/document.go | 558 -- .../blevesearch/bleve/v2/mapping/field.go | 359 - .../blevesearch/bleve/v2/mapping/index.go | 444 -- .../blevesearch/bleve/v2/mapping/mapping.go | 58 - .../blevesearch/bleve/v2/mapping/reflect.go | 92 - .../github.com/blevesearch/bleve/v2/query.go | 218 - .../blevesearch/bleve/v2/registry/analyzer.go | 89 - .../blevesearch/bleve/v2/registry/cache.go | 87 - .../bleve/v2/registry/char_filter.go | 89 - .../bleve/v2/registry/datetime_parser.go | 89 - .../bleve/v2/registry/fragment_formatter.go | 89 - .../bleve/v2/registry/fragmenter.go | 89 - .../bleve/v2/registry/highlighter.go | 89 - .../bleve/v2/registry/index_type.go | 45 - .../blevesearch/bleve/v2/registry/registry.go | 184 - .../blevesearch/bleve/v2/registry/store.go | 51 - .../bleve/v2/registry/token_filter.go | 89 - .../bleve/v2/registry/token_maps.go | 89 - .../bleve/v2/registry/tokenizer.go | 89 - .../github.com/blevesearch/bleve/v2/search.go | 633 -- .../blevesearch/bleve/v2/search/collector.go | 52 - .../bleve/v2/search/collector/list.go | 86 - .../bleve/v2/search/collector/topn.go | 412 -- .../bleve/v2/search/facet/benchmark_data.txt | 2909 -------- .../v2/search/facet/facet_builder_datetime.go | 163 - .../v2/search/facet/facet_builder_numeric.go | 157 - .../v2/search/facet/facet_builder_terms.go | 117 - .../bleve/v2/search/facets_builder.go | 341 - .../v2/search/highlight/format/html/html.go | 91 - .../highlight/fragmenter/simple/simple.go | 147 - .../bleve/v2/search/highlight/highlighter.go | 64 - .../search/highlight/highlighter/html/html.go | 50 - .../simple/fragment_scorer_simple.go | 49 - .../highlighter/simple/highlighter_simple.go | 221 - .../v2/search/highlight/term_locations.go | 105 - .../bleve/v2/search/levenshtein.go | 114 - .../bleve/v2/search/query/bool_field.go | 64 - .../bleve/v2/search/query/boolean.go | 248 - .../bleve/v2/search/query/conjunction.go | 112 - .../bleve/v2/search/query/date_range.go | 191 - .../bleve/v2/search/query/disjunction.go | 124 - .../bleve/v2/search/query/docid.go | 49 - .../bleve/v2/search/query/fuzzy.go | 77 - .../bleve/v2/search/query/geo_boundingbox.go | 113 - .../v2/search/query/geo_boundingpolygon.go | 94 - .../bleve/v2/search/query/geo_distance.go | 100 - .../bleve/v2/search/query/match.go | 176 - .../bleve/v2/search/query/match_all.go | 55 - .../bleve/v2/search/query/match_none.go | 55 - .../bleve/v2/search/query/match_phrase.go | 113 - .../bleve/v2/search/query/multi_phrase.go | 80 - .../bleve/v2/search/query/numeric_range.go | 87 - .../bleve/v2/search/query/phrase.go | 77 - .../bleve/v2/search/query/prefix.go | 62 - .../bleve/v2/search/query/query.go | 361 - .../bleve/v2/search/query/query_string.go | 67 - .../bleve/v2/search/query/query_string.y | 338 - .../v2/search/query/query_string_parser.go | 85 - .../bleve/v2/search/query/regexp.go | 81 - .../blevesearch/bleve/v2/search/query/term.go | 61 - .../bleve/v2/search/query/term_range.go | 95 - .../bleve/v2/search/query/wildcard.go | 93 - .../v2/search/scorer/scorer_conjunction.go | 72 - .../bleve/v2/search/scorer/scorer_constant.go | 127 - .../v2/search/scorer/scorer_disjunction.go | 83 - .../bleve/v2/search/scorer/scorer_term.go | 203 - .../blevesearch/bleve/v2/search/search.go | 378 - .../bleve/v2/search/searcher/search_docid.go | 109 - .../bleve/v2/search/searcher/search_fuzzy.go | 117 - .../v2/search/searcher/search_match_all.go | 121 - .../bleve/v2/search/searcher/search_regexp.go | 131 - .../bleve/v2/search/searcher/search_term.go | 141 - .../v2/search/searcher/search_term_range.go | 85 - .../blevesearch/bleve/v2/search/sort.go | 746 -- .../blevesearch/bleve/v2/size/sizes.go | 59 - .../blevesearch/bleve_index_api/.golangci.yml | 37 - .../blevesearch/bleve_index_api/README.md | 11 - .../blevesearch/bleve_index_api/analysis.go | 53 - .../blevesearch/bleve_index_api/batch.go | 101 - .../blevesearch/bleve_index_api/document.go | 78 - .../blevesearch/bleve_index_api/freq.go | 106 - .../blevesearch/bleve_index_api/go.mod | 3 - .../blevesearch/bleve_index_api/go.sum | 0 .../blevesearch/bleve_index_api/index.go | 212 - .../bleve_index_api/indexing_options.go | 77 - .../blevesearch/bleve_index_api/optimize.go | 39 - .../blevesearch/go-porterstemmer/go.mod | 3 - vendor/github.com/blevesearch/mmap-go/go.mod | 3 - vendor/github.com/blevesearch/mmap-go/go.sum | 2 - .../scorch_segment_api/v2/.golangci.yml | 42 - .../scorch_segment_api/v2/README.md | 11 - .../blevesearch/scorch_segment_api/v2/go.mod | 10 - .../blevesearch/scorch_segment_api/v2/go.sum | 35 - vendor/github.com/blevesearch/segment/go.mod | 3 - .../blevesearch/snowballstem/go.mod | 3 - .../upsidedown_store_api/README.md | 7 - .../blevesearch/upsidedown_store_api/batch.go | 62 - .../blevesearch/upsidedown_store_api/go.mod | 3 - .../upsidedown_store_api/kvstore.go | 174 - .../blevesearch/upsidedown_store_api/merge.go | 64 - .../upsidedown_store_api/multiget.go | 33 - vendor/github.com/blevesearch/vellum/fst.go | 2 +- vendor/github.com/blevesearch/vellum/go.mod | 10 - vendor/github.com/blevesearch/vellum/go.sum | 40 - .../blevesearch/zapx/v11/.golangci.yml | 29 - .../github.com/blevesearch/zapx/v11/LICENSE | 202 - .../github.com/blevesearch/zapx/v11/README.md | 163 - .../github.com/blevesearch/zapx/v11/build.go | 156 - .../blevesearch/zapx/v11/contentcoder.go | 230 - .../github.com/blevesearch/zapx/v11/count.go | 61 - .../github.com/blevesearch/zapx/v11/dict.go | 158 - .../blevesearch/zapx/v11/docvalues.go | 306 - .../blevesearch/zapx/v11/enumerator.go | 126 - vendor/github.com/blevesearch/zapx/v11/go.mod | 13 - vendor/github.com/blevesearch/zapx/v11/go.sum | 73 - .../blevesearch/zapx/v11/intcoder.go | 172 - .../blevesearch/zapx/v11/memuvarint.go | 94 - .../github.com/blevesearch/zapx/v11/merge.go | 856 --- vendor/github.com/blevesearch/zapx/v11/new.go | 817 -- .../blevesearch/zapx/v11/posting.go | 908 --- .../github.com/blevesearch/zapx/v11/read.go | 43 - .../blevesearch/zapx/v11/segment.go | 570 -- .../github.com/blevesearch/zapx/v11/sizes.go | 59 - .../github.com/blevesearch/zapx/v11/write.go | 145 - vendor/github.com/blevesearch/zapx/v11/zap.md | 177 - .../blevesearch/zapx/v12/.golangci.yml | 29 - .../github.com/blevesearch/zapx/v12/LICENSE | 202 - .../github.com/blevesearch/zapx/v12/README.md | 163 - .../github.com/blevesearch/zapx/v12/build.go | 156 - .../github.com/blevesearch/zapx/v12/chunk.go | 54 - vendor/github.com/blevesearch/zapx/v12/go.mod | 13 - vendor/github.com/blevesearch/zapx/v12/go.sum | 73 - .../blevesearch/zapx/v12/intDecoder.go | 109 - .../blevesearch/zapx/v12/intcoder.go | 203 - .../blevesearch/zapx/v12/memuvarint.go | 94 - .../github.com/blevesearch/zapx/v12/merge.go | 843 --- vendor/github.com/blevesearch/zapx/v12/new.go | 830 --- .../blevesearch/zapx/v12/posting.go | 796 -- .../github.com/blevesearch/zapx/v12/read.go | 43 - .../blevesearch/zapx/v12/segment.go | 570 -- .../github.com/blevesearch/zapx/v12/sizes.go | 59 - .../github.com/blevesearch/zapx/v12/write.go | 145 - vendor/github.com/blevesearch/zapx/v12/zap.md | 177 - .../blevesearch/zapx/v13/.gitignore | 12 - .../blevesearch/zapx/v13/.golangci.yml | 29 - .../github.com/blevesearch/zapx/v13/LICENSE | 202 - .../github.com/blevesearch/zapx/v13/README.md | 163 - .../github.com/blevesearch/zapx/v13/build.go | 156 - .../github.com/blevesearch/zapx/v13/chunk.go | 54 - .../blevesearch/zapx/v13/contentcoder.go | 243 - .../github.com/blevesearch/zapx/v13/count.go | 61 - .../github.com/blevesearch/zapx/v13/dict.go | 158 - .../blevesearch/zapx/v13/docvalues.go | 311 - .../blevesearch/zapx/v13/enumerator.go | 138 - vendor/github.com/blevesearch/zapx/v13/go.mod | 13 - vendor/github.com/blevesearch/zapx/v13/go.sum | 73 - .../blevesearch/zapx/v13/intDecoder.go | 109 - .../blevesearch/zapx/v13/intcoder.go | 206 - .../blevesearch/zapx/v13/memuvarint.go | 94 - .../github.com/blevesearch/zapx/v13/merge.go | 843 --- .../blevesearch/zapx/v13/posting.go | 796 -- .../github.com/blevesearch/zapx/v13/read.go | 43 - .../blevesearch/zapx/v13/segment.go | 570 -- .../github.com/blevesearch/zapx/v13/sizes.go | 59 - .../github.com/blevesearch/zapx/v13/write.go | 145 - vendor/github.com/blevesearch/zapx/v13/zap.md | 177 - .../blevesearch/zapx/v14/.golangci.yml | 29 - .../github.com/blevesearch/zapx/v14/LICENSE | 202 - .../github.com/blevesearch/zapx/v14/README.md | 163 - .../github.com/blevesearch/zapx/v14/build.go | 156 - .../blevesearch/zapx/v14/contentcoder.go | 243 - .../github.com/blevesearch/zapx/v14/count.go | 61 - .../github.com/blevesearch/zapx/v14/dict.go | 158 - .../blevesearch/zapx/v14/docvalues.go | 311 - .../blevesearch/zapx/v14/enumerator.go | 138 - vendor/github.com/blevesearch/zapx/v14/go.mod | 13 - vendor/github.com/blevesearch/zapx/v14/go.sum | 73 - .../blevesearch/zapx/v14/intDecoder.go | 116 - .../blevesearch/zapx/v14/intcoder.go | 206 - .../github.com/blevesearch/zapx/v14/merge.go | 843 --- vendor/github.com/blevesearch/zapx/v14/new.go | 830 --- .../github.com/blevesearch/zapx/v14/read.go | 43 - .../blevesearch/zapx/v14/segment.go | 570 -- .../github.com/blevesearch/zapx/v14/sizes.go | 59 - .../github.com/blevesearch/zapx/v14/write.go | 145 - vendor/github.com/blevesearch/zapx/v14/zap.md | 177 - .../blevesearch/zapx/v15/.gitignore | 12 - .../blevesearch/zapx/v15/.golangci.yml | 29 - .../github.com/blevesearch/zapx/v15/LICENSE | 202 - .../github.com/blevesearch/zapx/v15/README.md | 163 - .../github.com/blevesearch/zapx/v15/build.go | 156 - .../github.com/blevesearch/zapx/v15/chunk.go | 67 - .../blevesearch/zapx/v15/contentcoder.go | 243 - .../github.com/blevesearch/zapx/v15/count.go | 61 - .../github.com/blevesearch/zapx/v15/dict.go | 158 - .../blevesearch/zapx/v15/docvalues.go | 311 - .../blevesearch/zapx/v15/enumerator.go | 138 - vendor/github.com/blevesearch/zapx/v15/go.mod | 13 - vendor/github.com/blevesearch/zapx/v15/go.sum | 73 - .../blevesearch/zapx/v15/memuvarint.go | 94 - .../github.com/blevesearch/zapx/v15/merge.go | 895 --- vendor/github.com/blevesearch/zapx/v15/new.go | 837 --- .../github.com/blevesearch/zapx/v15/plugin.go | 27 - .../blevesearch/zapx/v15/posting.go | 869 --- .../github.com/blevesearch/zapx/v15/read.go | 43 - .../blevesearch/zapx/v15/segment.go | 570 -- .../github.com/blevesearch/zapx/v15/sizes.go | 59 - .../github.com/blevesearch/zapx/v15/write.go | 145 - vendor/github.com/blevesearch/zapx/v15/zap.md | 177 - .../bleve/v2 => blugelabs/bluge}/.gitignore | 2 +- .../github.com/blugelabs/bluge/.golangci.yml | 248 + vendor/github.com/blugelabs/bluge/AUTHORS | 13 + .../blugelabs/bluge/CONTRIBUTING.md | 11 + .../bleve/v2 => blugelabs/bluge}/LICENSE | 0 vendor/github.com/blugelabs/bluge/README.md | 85 + .../bluge/analysis/analyzer/keyword.go} | 19 +- .../bluge/analysis/analyzer/simple.go} | 23 +- .../bluge/analysis/analyzer/standard.go | 30 + .../blugelabs/bluge/analysis/analyzer/web.go | 32 + .../blugelabs/bluge/analysis/freq.go | 200 + .../bluge/analysis/lang/en/analyzer_en.go | 42 + .../analysis/lang/en/possessive_filter_en.go | 19 +- .../analysis/lang/en/stemmer_en_snowball.go | 16 +- .../bluge/analysis/lang/en/stop_filter_en.go} | 17 +- .../bluge}/analysis/lang/en/stop_words_en.go | 19 +- .../blugelabs/bluge/analysis/size.go | 16 + .../bluge}/analysis/test_words.txt | 0 .../bluge/analysis/token/apostrophe.go | 41 + .../bluge/analysis/token/camelcase.go | 65 + .../bluge/analysis/token/camelcase_parser.go | 100 + .../bluge/analysis/token/camelcase_states.go | 87 + .../blugelabs/bluge/analysis/token/dict.go | 94 + .../bluge/analysis/token/edgengram.go | 96 + .../blugelabs/bluge/analysis/token/elision.go | 54 + .../blugelabs/bluge/analysis/token/keyword.go | 39 + .../blugelabs/bluge/analysis/token/length.go | 57 + .../bluge/analysis/token}/lowercase.go | 24 +- .../blugelabs/bluge/analysis/token/ngram.go | 67 + .../bluge/analysis/token}/porter.go | 21 +- .../blugelabs/bluge/analysis/token/reverse.go | 62 + .../blugelabs/bluge/analysis/token/shingle.go | 124 + .../bluge/analysis/token}/stop.go | 33 +- .../bluge/analysis/token/truncate.go | 41 + .../bluge/analysis/token/unicodenorm.go | 37 + .../blugelabs/bluge/analysis/token/unique.go | 47 + .../bluge/analysis/tokenizer/character.go | 73 + .../bluge/analysis/tokenizer/exception.go | 90 + .../bluge/analysis/tokenizer/letter.go} | 16 +- .../bluge/analysis/tokenizer/regexp.go | 64 + .../bluge/analysis/tokenizer}/single.go | 38 +- .../bluge/analysis/tokenizer}/unicode.go | 33 +- .../blugelabs/bluge/analysis/tokenizer/web.go | 32 + .../bluge/analysis/tokenizer/whitespace.go} | 16 +- .../bluge}/analysis/tokenmap.go | 13 +- .../v2 => blugelabs/bluge}/analysis/type.go | 32 +- .../v2 => blugelabs/bluge}/analysis/util.go | 0 vendor/github.com/blugelabs/bluge/batch.go | 36 + vendor/github.com/blugelabs/bluge/config.go | 118 + vendor/github.com/blugelabs/bluge/doc.go | 70 + vendor/github.com/blugelabs/bluge/document.go | 93 + vendor/github.com/blugelabs/bluge/field.go | 452 ++ .../github.com/blugelabs/bluge/index/batch.go | 54 + .../blugelabs/bluge/index/communication.go | 57 + .../blugelabs/bluge/index/config.go | 222 + .../github.com/blugelabs/bluge/index/count.go | 83 + .../blugelabs/bluge/index/deletion.go | 96 + .../bluge/index/dictionary.go} | 59 +- .../blugelabs/bluge/index/directory.go | 74 + .../blugelabs/bluge/index/directory_fs.go | 273 + .../blugelabs/bluge/index/directory_fs_nix.go | 35 + .../bluge/index/directory_fs_windows.go} | 19 +- .../blugelabs/bluge/index/directory_mem.go | 107 + .../scorch => blugelabs/bluge/index}/empty.go | 16 +- .../github.com/blugelabs/bluge/index/event.go | 37 + .../blugelabs/bluge/index/introducer.go | 356 + .../blugelabs/bluge/index/lock/lock.go | 55 + .../bluge/index/lock/lock_nix.go} | 37 +- .../bluge/index/lock/lock_windows.go | 48 + .../github.com/blugelabs/bluge/index/merge.go | 374 + .../bluge/index}/mergeplan/merge_plan.go | 80 +- .../bluge/index}/mergeplan/sort.go | 4 +- .../bluge/index}/optimize.go | 139 +- .../blugelabs/bluge/index/persister.go | 379 + .../blugelabs/bluge/index/postings.go | 137 + .../blugelabs/bluge/index/postings_all.go | 112 + .../blugelabs/bluge/index/segment.go | 92 + .../blugelabs/bluge/index/segment_plugin.go | 117 + .../github.com/blugelabs/bluge/index/sizes.go | 49 + .../blugelabs/bluge/index/snapshot.go | 775 ++ .../scorch => blugelabs/bluge/index}/stats.go | 63 +- .../bluge/index}/unadorned.go | 73 +- .../blugelabs/bluge/index/writer.go | 556 ++ .../blugelabs/bluge/index/writer_offline.go | 215 + .../github.com/blugelabs/bluge/multisearch.go | 95 + .../v2 => blugelabs/bluge}/numeric/bin.go | 14 + .../v2 => blugelabs/bluge}/numeric/float.go | 4 +- .../bluge/numeric}/geo/README.md | 2 +- .../v2 => blugelabs/bluge/numeric}/geo/geo.go | 19 +- .../bluge/numeric}/geo/geo_dist.go | 34 +- .../bluge/numeric}/geo/geohash.go | 22 +- .../blugelabs/bluge/numeric/geo/parse.go | 196 + .../bluge/numeric}/geo/sloppy.go | 7 +- .../bluge}/numeric/prefix_coded.go | 10 +- vendor/github.com/blugelabs/bluge/query.go | 1476 ++++ vendor/github.com/blugelabs/bluge/reader.go | 100 + vendor/github.com/blugelabs/bluge/search.go | 271 + .../blugelabs/bluge/search/aggregations.go | 145 + .../bluge/search/aggregations/cardinality.go | 67 + .../bluge/search/aggregations/count.go | 35 + .../bluge/search/aggregations/duration.go | 54 + .../bluge/search/aggregations/filter.go | 131 + .../bluge/search/aggregations/metric.go | 173 + .../bluge/search/aggregations/percentiles.go | 82 + .../bluge/search/aggregations/range.go | 125 + .../bluge/search/aggregations/range_date.go | 130 + .../bluge/search/aggregations/terms.go | 168 + .../blugelabs/bluge/search/collector.go | 37 + .../blugelabs/bluge/search/collector/all.go | 107 + .../bluge}/search/collector/heap.go | 4 +- .../bluge/search/collector/iterator.go | 42 + .../bluge/search/collector/size.go} | 13 +- .../bluge}/search/collector/slice.go | 4 +- .../blugelabs/bluge/search/collector/topn.go | 264 + .../bluge}/search/explanation.go | 32 +- .../v2 => blugelabs/bluge}/search/pool.go | 23 +- .../blugelabs/bluge/search/search.go | 372 + .../search/searcher/ordered_searchers_list.go | 4 +- .../bluge}/search/searcher/search_boolean.go | 319 +- .../search/searcher/search_conjunction.go | 105 +- .../search/searcher/search_disjunction.go | 43 +- .../searcher/search_disjunction_heap.go | 130 +- .../searcher/search_disjunction_slice.go | 113 +- .../bluge}/search/searcher/search_filter.go | 31 +- .../bluge/search/searcher/search_fuzzy.go | 149 + .../search/searcher/search_geoboundingbox.go | 153 +- .../searcher/search_geopointdistance.go | 47 +- .../search/searcher/search_geopolygon.go | 38 +- .../bluge/search/searcher/search_match_all.go | 112 + .../search/searcher/search_match_none.go | 37 +- .../search/searcher/search_multi_term.go | 97 +- .../search/searcher/search_numeric_range.go | 121 +- .../bluge}/search/searcher/search_phrase.go | 127 +- .../bluge/search/searcher/search_regexp.go} | 40 +- .../bluge/search/searcher/search_term.go | 167 + .../search/searcher/search_term_prefix.go | 19 +- .../search/searcher/search_term_range.go | 67 + .../blugelabs/bluge/search/searcher/size.go | 67 + .../blugelabs/bluge/search/similarity/bm25.go | 137 + .../bluge/search/similarity/composite.go | 43 + .../bluge/search/similarity/constant.go | 34 + .../github.com/blugelabs/bluge/search/size.go | 48 + .../github.com/blugelabs/bluge/search/sort.go | 159 + .../blugelabs/bluge/search/source.go | 412 ++ .../v2 => blugelabs/bluge}/search/util.go | 9 +- vendor/github.com/blugelabs/bluge/size.go | 46 + vendor/github.com/blugelabs/bluge/writer.go | 79 + .../blugelabs/bluge/writer_offline.go | 72 + .../bluge_segment_api}/.gitignore | 1 - .../blugelabs/bluge_segment_api/.golangci.yml | 148 + .../blugelabs/bluge_segment_api/AUTHORS | 10 + .../bluge_segment_api/CONTRIBUTING.md | 11 + .../bluge_segment_api}/LICENSE | 0 .../blugelabs/bluge_segment_api/README.md | 9 + .../bluge_segment_api}/automaton.go | 3 +- .../blugelabs/bluge_segment_api/data.go | 129 + .../blugelabs/bluge_segment_api/document.go | 43 + .../bluge_segment_api}/segment.go | 112 +- .../blugelabs/bluge_segment_api/stats.go | 42 + .../zapx/v11 => blugelabs/ice}/.gitignore | 2 +- vendor/github.com/blugelabs/ice/.golangci.yml | 148 + vendor/github.com/blugelabs/ice/AUTHORS | 10 + .../github.com/blugelabs/ice/CONTRIBUTING.md | 11 + .../v2 => blugelabs/ice}/LICENSE | 0 vendor/github.com/blugelabs/ice/README.md | 334 + .../zapx/v14 => blugelabs/ice}/chunk.go | 42 +- .../v12 => blugelabs/ice}/contentcoder.go | 35 +- .../zapx/v12 => blugelabs/ice}/count.go | 30 +- .../zapx/v12 => blugelabs/ice}/dict.go | 51 +- .../zapx/v12 => blugelabs/ice}/docvalues.go | 144 +- .../zapx/v12 => blugelabs/ice}/enumerator.go | 20 +- vendor/github.com/blugelabs/ice/footer.go | 119 + .../query/boost.go => blugelabs/ice/freq.go} | 31 +- .../zapx/v15 => blugelabs/ice}/intcoder.go | 38 +- .../ice/intdecoder.go} | 49 +- vendor/github.com/blugelabs/ice/load.go | 130 + .../zapx/v14 => blugelabs/ice}/memuvarint.go | 41 +- vendor/github.com/blugelabs/ice/merge.go | 860 +++ .../zapx/v13 => blugelabs/ice}/new.go | 664 +- .../zapx/v14 => blugelabs/ice}/posting.go | 273 +- vendor/github.com/blugelabs/ice/read.go | 74 + vendor/github.com/blugelabs/ice/segment.go | 344 + vendor/github.com/blugelabs/ice/sizes.go | 55 + vendor/github.com/blugelabs/ice/stats.go | 54 + vendor/github.com/blugelabs/ice/write.go | 248 + .../v12 => blugelabs/query_string}/.gitignore | 3 +- .../blugelabs/query_string/.golangci.yml | 162 + .../query_string}/LICENSE | 0 .../blugelabs/query_string/README.md | 3 + .../blugelabs/query_string/query_string.y | 233 + .../query_string}/query_string.y.go | 345 +- .../query_string}/query_string_lex.go | 76 +- .../query_string/query_string_parser.go | 293 + vendor/github.com/caio/go-tdigest/.gitignore | 2 + .../caio/go-tdigest/CONTRIBUTING.md | 42 + vendor/github.com/caio/go-tdigest/Gopkg.lock | 41 + vendor/github.com/caio/go-tdigest/Gopkg.toml | 21 + vendor/github.com/caio/go-tdigest/LICENSE | 21 + vendor/github.com/caio/go-tdigest/README.md | 94 + vendor/github.com/caio/go-tdigest/options.go | 51 + vendor/github.com/caio/go-tdigest/rng.go | 40 + .../caio/go-tdigest/serialization.go | 202 + vendor/github.com/caio/go-tdigest/summary.go | 206 + vendor/github.com/caio/go-tdigest/tdigest.go | 445 ++ .../github.com/cespare/xxhash/v2/.travis.yml | 8 + .../LICENSE => cespare/xxhash/v2/LICENSE.txt} | 6 +- vendor/github.com/cespare/xxhash/v2/README.md | 67 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 236 + .../cespare/xxhash/v2/xxhash_amd64.go | 13 + .../cespare/xxhash/v2/xxhash_amd64.s | 215 + .../cespare/xxhash/v2/xxhash_other.go | 76 + .../cespare/xxhash/v2/xxhash_safe.go | 15 + .../cespare/xxhash/v2/xxhash_unsafe.go | 46 + .../github.com/dgrijalva/jwt-go/.travis.yml | 13 - .../dgrijalva/jwt-go/MIGRATION_GUIDE.md | 97 - vendor/github.com/dgrijalva/jwt-go/claims.go | 134 - .../github.com/dgrijalva/jwt-go/map_claims.go | 94 - vendor/github.com/dgryski/go-metro/LICENSE | 24 + vendor/github.com/dgryski/go-metro/README | 6 + vendor/github.com/dgryski/go-metro/metro.py | 199 + .../github.com/dgryski/go-metro/metro128.go | 94 + vendor/github.com/dgryski/go-metro/metro64.go | 85 + .../github.com/dgryski/go-metro/metro_amd64.s | 372 + .../github.com/dgryski/go-metro/metro_stub.go | 10 + vendor/github.com/dop251/goja/README.md | 15 +- vendor/github.com/dop251/goja/array.go | 124 +- vendor/github.com/dop251/goja/array_sparse.go | 10 +- vendor/github.com/dop251/goja/ast/node.go | 210 +- .../github.com/dop251/goja/builtin_array.go | 53 +- .../github.com/dop251/goja/builtin_error.go | 26 + .../dop251/goja/builtin_function.go | 19 +- vendor/github.com/dop251/goja/builtin_json.go | 16 +- vendor/github.com/dop251/goja/builtin_map.go | 6 +- .../github.com/dop251/goja/builtin_object.go | 4 +- .../github.com/dop251/goja/builtin_promise.go | 597 ++ .../github.com/dop251/goja/builtin_proxy.go | 20 +- .../github.com/dop251/goja/builtin_regexp.go | 8 +- vendor/github.com/dop251/goja/builtin_set.go | 6 +- .../github.com/dop251/goja/builtin_string.go | 2 +- .../github.com/dop251/goja/builtin_symbol.go | 2 - .../dop251/goja/builtin_typedarrays.go | 148 +- vendor/github.com/dop251/goja/compiler.go | 225 +- .../github.com/dop251/goja/compiler_expr.go | 1085 ++- .../github.com/dop251/goja/compiler_stmt.go | 248 +- vendor/github.com/dop251/goja/date.go | 100 +- vendor/github.com/dop251/goja/date_parser.go | 9 +- vendor/github.com/dop251/goja/destruct.go | 277 + vendor/github.com/dop251/goja/file/file.go | 22 +- vendor/github.com/dop251/goja/func.go | 52 +- vendor/github.com/dop251/goja/go.mod | 13 - vendor/github.com/dop251/goja/go.sum | 23 - vendor/github.com/dop251/goja/object.go | 22 +- .../github.com/dop251/goja/object_dynamic.go | 12 +- .../github.com/dop251/goja/object_goslice.go | 2 +- .../dop251/goja/object_goslice_reflect.go | 4 +- .../dop251/goja/parser/expression.go | 590 +- vendor/github.com/dop251/goja/parser/lexer.go | 301 +- .../github.com/dop251/goja/parser/regexp.go | 34 +- .../dop251/goja/parser/statement.go | 129 +- vendor/github.com/dop251/goja/proxy.go | 41 +- vendor/github.com/dop251/goja/runtime.go | 473 +- vendor/github.com/dop251/goja/string.go | 1 + vendor/github.com/dop251/goja/string_ascii.go | 4 +- .../github.com/dop251/goja/string_unicode.go | 4 +- .../dop251/goja/token/token_const.go | 6 + vendor/github.com/dop251/goja/typedarrays.go | 140 +- vendor/github.com/dop251/goja/value.go | 32 +- vendor/github.com/dop251/goja/vm.go | 683 +- vendor/github.com/felixge/httpsnoop/go.mod | 3 - .../glycerine/go-unsnap-stream/.gitignore | 22 - .../glycerine/go-unsnap-stream/LICENSE | 21 - .../glycerine/go-unsnap-stream/README.md | 22 - .../glycerine/go-unsnap-stream/binary.dat | Bin 5592 -> 0 bytes .../go-unsnap-stream/binary.dat.snappy | Bin 5610 -> 0 bytes .../glycerine/go-unsnap-stream/rbuf.go | 375 - .../glycerine/go-unsnap-stream/snap.go | 100 - .../glycerine/go-unsnap-stream/unenc.txt | 1 - .../go-unsnap-stream/unenc.txt.snappy | Bin 31 -> 0 bytes .../glycerine/go-unsnap-stream/unsnap.go | 519 -- .../jwt-go => golang-jwt/jwt/v4}/.gitignore | 2 +- .../jwt-go => golang-jwt/jwt/v4}/LICENSE | 1 + .../golang-jwt/jwt/v4/MIGRATION_GUIDE.md | 22 + .../jwt-go => golang-jwt/jwt/v4}/README.md | 52 +- .../jwt/v4}/VERSION_HISTORY.md | 19 +- vendor/github.com/golang-jwt/jwt/v4/claims.go | 267 + .../jwt-go => golang-jwt/jwt/v4}/doc.go | 0 .../jwt-go => golang-jwt/jwt/v4}/ecdsa.go | 30 +- .../jwt/v4}/ecdsa_utils.go | 8 +- .../github.com/golang-jwt/jwt/v4/ed25519.go | 85 + .../golang-jwt/jwt/v4/ed25519_utils.go | 64 + .../jwt-go => golang-jwt/jwt/v4}/errors.go | 6 +- .../jwt-go => golang-jwt/jwt/v4}/hmac.go | 6 +- .../golang-jwt/jwt/v4/map_claims.go | 148 + .../jwt-go => golang-jwt/jwt/v4}/none.go | 2 +- .../jwt-go => golang-jwt/jwt/v4}/parser.go | 12 +- .../jwt-go => golang-jwt/jwt/v4}/rsa.go | 6 +- .../jwt-go => golang-jwt/jwt/v4}/rsa_pss.go | 6 +- .../jwt-go => golang-jwt/jwt/v4}/rsa_utils.go | 16 +- .../jwt/v4}/signing_method.go | 6 +- .../golang-jwt/jwt/v4/staticcheck.conf | 1 + .../jwt-go => golang-jwt/jwt/v4}/token.go | 32 +- vendor/github.com/golang-jwt/jwt/v4/types.go | 125 + vendor/github.com/golang/glog/README | 44 - vendor/github.com/golang/glog/README.md | 36 + vendor/github.com/golang/glog/glog.go | 4 +- .../protobuf/ptypes/struct/struct.pb.go | 78 - .../protobuf/ptypes/wrappers/wrappers.pb.go | 71 - vendor/github.com/golang/snappy/go.mod | 1 - vendor/github.com/gorilla/handlers/go.mod | 5 - vendor/github.com/gorilla/handlers/go.sum | 2 - vendor/github.com/gorilla/mux/go.mod | 3 - vendor/github.com/gorilla/websocket/go.mod | 3 - vendor/github.com/gorilla/websocket/go.sum | 0 .../v2/internal/casing/BUILD.bazel | 8 +- .../v2/internal/codegenerator/BUILD.bazel | 26 +- .../v2/internal/descriptor/BUILD.bazel | 54 +- .../internal/descriptor/apiconfig/BUILD.bazel | 8 +- .../descriptor/apiconfig/apiconfig.pb.go | 4 +- .../apiconfig/apiconfig.swagger.json | 9 +- .../descriptor/openapiconfig/BUILD.bazel | 10 +- .../openapiconfig/openapiconfig.pb.go | 4 +- .../openapiconfig/openapiconfig.swagger.json | 9 +- .../v2/internal/descriptor/registry.go | 96 +- .../v2/internal/generator/BUILD.bazel | 10 +- .../v2/internal/httprule/BUILD.bazel | 18 +- .../v2/internal/httprule/compile.go | 4 + .../v2/internal/httprule/parse.go | 4 + .../v2/protoc-gen-grpc-gateway/BUILD.bazel | 20 +- .../internal/gengateway/BUILD.bazel | 34 +- .../internal/gengateway/template.go | 25 +- .../v2/protoc-gen-openapiv2/BUILD.bazel | 20 +- .../v2/protoc-gen-openapiv2/defs.bzl | 28 +- .../internal/genopenapi/BUILD.bazel | 68 +- .../internal/genopenapi/generator.go | 5 + .../internal/genopenapi/naming.go | 110 + .../internal/genopenapi/template.go | 388 +- .../internal/genopenapi/types.go | 16 +- .../v2/protoc-gen-openapiv2/main.go | 25 +- .../protoc-gen-openapiv2/options/BUILD.bazel | 8 +- .../options/annotations.pb.go | 46 +- .../options/annotations.swagger.json | 9 +- .../options/openapiv2.pb.go | 42 +- .../options/openapiv2.swagger.json | 9 +- .../grpc-gateway/v2/runtime/BUILD.bazel | 70 +- .../grpc-gateway/v2/runtime/context.go | 44 +- .../grpc-gateway/v2/runtime/convert.go | 6 +- .../grpc-gateway/v2/runtime/errors.go | 37 +- .../grpc-gateway/v2/runtime/fieldmask.go | 15 + .../grpc-gateway/v2/runtime/handler.go | 27 +- .../grpc-gateway/v2/runtime/marshal_jsonpb.go | 37 + .../grpc-gateway/v2/runtime/mux.go | 2 +- .../grpc-gateway/v2/runtime/query.go | 12 +- .../grpc-gateway/v2/utilities/BUILD.bazel | 12 +- .../nakama-common/rtapi/realtime.pb.go | 634 +- .../nakama-common/rtapi/realtime.proto | 5 +- vendor/github.com/jackc/chunkreader/v2/go.mod | 3 - vendor/github.com/jackc/pgconn/CHANGELOG.md | 13 + vendor/github.com/jackc/pgconn/config.go | 22 +- vendor/github.com/jackc/pgconn/defaults.go | 13 + .../jackc/pgconn/defaults_windows.go | 13 + vendor/github.com/jackc/pgconn/errors.go | 42 +- vendor/github.com/jackc/pgconn/go.mod | 15 - vendor/github.com/jackc/pgconn/go.sum | 136 - vendor/github.com/jackc/pgconn/pgconn.go | 57 +- vendor/github.com/jackc/pgerrcode/go.mod | 3 - vendor/github.com/jackc/pgio/go.mod | 3 - vendor/github.com/jackc/pgpassfile/go.mod | 5 - vendor/github.com/jackc/pgpassfile/go.sum | 7 - .../v2/authentication_cleartext_password.go | 13 + .../v2/authentication_md5_password.go | 34 + .../jackc/pgproto3/v2/authentication_ok.go | 13 + .../jackc/pgproto3/v2/authentication_sasl.go | 15 + .../v2/authentication_sasl_continue.go | 33 + .../pgproto3/v2/authentication_sasl_final.go | 33 + .../github.com/jackc/pgproto3/v2/backend.go | 98 +- vendor/github.com/jackc/pgproto3/v2/bind.go | 33 + vendor/github.com/jackc/pgproto3/v2/close.go | 25 + .../jackc/pgproto3/v2/command_complete.go | 18 + .../jackc/pgproto3/v2/copy_both_response.go | 25 + .../github.com/jackc/pgproto3/v2/copy_data.go | 18 + .../jackc/pgproto3/v2/copy_in_response.go | 25 + .../jackc/pgproto3/v2/copy_out_response.go | 25 + .../github.com/jackc/pgproto3/v2/data_row.go | 25 + .../github.com/jackc/pgproto3/v2/describe.go | 24 + .../jackc/pgproto3/v2/error_response.go | 150 +- .../github.com/jackc/pgproto3/v2/frontend.go | 27 +- .../pgproto3/v2/function_call_response.go | 18 + vendor/github.com/jackc/pgproto3/v2/go.mod | 9 - vendor/github.com/jackc/pgproto3/v2/go.sum | 14 - .../jackc/pgproto3/v2/password_message.go | 3 + .../github.com/jackc/pgproto3/v2/pgproto3.go | 25 +- .../jackc/pgproto3/v2/ready_for_query.go | 21 + .../jackc/pgproto3/v2/row_description.go | 31 + .../pgproto3/v2/sasl_initial_response.go | 25 + .../jackc/pgproto3/v2/sasl_response.go | 18 + vendor/github.com/jackc/pgservicefile/go.mod | 5 - vendor/github.com/jackc/pgservicefile/go.sum | 10 - vendor/github.com/jackc/pgtype/CHANGELOG.md | 11 + .../github.com/jackc/pgtype/composite_type.go | 4 + vendor/github.com/jackc/pgtype/convert.go | 39 +- vendor/github.com/jackc/pgtype/go.mod | 13 - vendor/github.com/jackc/pgtype/go.sum | 183 - vendor/github.com/jackc/pgtype/inet.go | 18 +- vendor/github.com/jackc/pgtype/pgtype.go | 1 + vendor/github.com/jackc/pgx/v4/CHANGELOG.md | 16 + vendor/github.com/jackc/pgx/v4/README.md | 1 + vendor/github.com/jackc/pgx/v4/go.mod | 21 - vendor/github.com/jackc/pgx/v4/go.sum | 484 -- vendor/github.com/jackc/pgx/v4/stdlib/sql.go | 101 +- vendor/github.com/jackc/pgx/v4/tx.go | 5 + vendor/github.com/jackc/pgx/v4/values.go | 2 + .../m3db/prometheus_client_golang/AUTHORS.md | 18 - .../prometheus/collector.go | 75 - .../prometheus/counter.go | 172 - .../prometheus/gauge.go | 140 - .../prometheus/histogram.go | 444 -- .../prometheus/http.go | 490 -- .../prometheus/process_collector.go | 142 - .../prometheus/promhttp/http.go | 201 - .../prometheus/untyped.go | 138 - .../prometheus/vec.go | 404 - .../prometheus_client_model/go/metrics.pb.go | 360 - .../m3db/prometheus_common/expfmt/encode.go | 88 - .../prometheus_common/expfmt/text_create.go | 303 - .../m3db/prometheus_procfs/.travis.yml | 5 - .../m3db/prometheus_procfs/AUTHORS.md | 21 - .../m3db/prometheus_procfs/CONTRIBUTING.md | 18 - .../m3db/prometheus_procfs/Makefile | 6 - .../m3db/prometheus_procfs/README.md | 10 - .../github.com/m3db/prometheus_procfs/fs.go | 33 - .../m3db/prometheus_procfs/mdstat.go | 138 - .../github.com/m3db/prometheus_procfs/stat.go | 56 - vendor/github.com/mschoch/smat/go.mod | 3 - vendor/github.com/philhofer/fwd/LICENSE.md | 7 - vendor/github.com/philhofer/fwd/README.md | 315 - vendor/github.com/philhofer/fwd/reader.go | 383 - vendor/github.com/philhofer/fwd/writer.go | 224 - .../philhofer/fwd/writer_appengine.go | 5 - .../github.com/philhofer/fwd/writer_unsafe.go | 18 - .../client_golang}/LICENSE | 0 .../client_golang}/NOTICE | 0 .../client_golang}/prometheus/.gitignore | 0 .../client_golang}/prometheus/README.md | 0 .../client_golang/prometheus/collector.go | 120 + .../client_golang/prometheus/counter.go | 321 + .../client_golang}/prometheus/desc.go | 77 +- .../client_golang}/prometheus/doc.go | 120 +- .../prometheus/expvar_collector.go | 39 +- .../client_golang}/prometheus/fnv.go | 13 + .../client_golang/prometheus/gauge.go | 289 + .../client_golang}/prometheus/go_collector.go | 160 +- .../client_golang/prometheus/histogram.go | 642 ++ .../prometheus/internal/metric.go | 85 + .../client_golang/prometheus/labels.go | 87 + .../client_golang}/prometheus/metric.go | 98 +- .../client_golang/prometheus/observer.go | 64 + .../prometheus/process_collector.go | 166 + .../prometheus/process_collector_other.go | 65 + .../prometheus/process_collector_windows.go | 116 + .../prometheus/promhttp/delegator.go | 368 + .../client_golang/prometheus/promhttp/http.go | 383 + .../prometheus/promhttp/instrument_client.go | 219 + .../prometheus/promhttp/instrument_server.go | 458 ++ .../client_golang}/prometheus/registry.go | 764 +- .../client_golang}/prometheus/summary.go | 338 +- .../client_golang/prometheus/timer.go | 54 + .../client_golang/prometheus/untyped.go | 42 + .../client_golang}/prometheus/value.go | 158 +- .../client_golang/prometheus/vec.go | 556 ++ .../client_golang/prometheus/wrap.go | 214 + .../client_model}/LICENSE | 0 .../client_model}/NOTICE | 0 .../prometheus/client_model/go/metrics.pb.go | 723 ++ .../common}/LICENSE | 0 .../common}/NOTICE | 0 .../common}/expfmt/decode.go | 6 +- .../prometheus/common/expfmt/encode.go | 162 + .../common}/expfmt/expfmt.go | 11 +- .../common}/expfmt/fuzz.go | 0 .../common/expfmt/openmetrics_create.go | 527 ++ .../prometheus/common/expfmt/text_create.go | 465 ++ .../common}/expfmt/text_parse.go | 30 +- .../bitbucket.org/ww/goautoneg/README.txt | 0 .../bitbucket.org/ww/goautoneg/autoneg.go | 6 +- .../common}/model/alert.go | 0 .../common}/model/fingerprinting.go | 0 .../common}/model/fnv.go | 2 +- .../common}/model/labels.go | 8 + .../common}/model/labelset.go | 0 .../common}/model/metric.go | 1 - .../common}/model/model.go | 0 .../common}/model/signature.go | 0 .../common}/model/silence.go | 0 .../common}/model/time.go | 153 +- .../common}/model/value.go | 0 .../github.com/prometheus/procfs/.gitignore | 1 + .../prometheus/procfs/.golangci.yml | 4 + .../prometheus/procfs/CODE_OF_CONDUCT.md | 3 + .../prometheus/procfs/CONTRIBUTING.md | 121 + .../procfs}/LICENSE | 0 .../prometheus/procfs/MAINTAINERS.md | 2 + vendor/github.com/prometheus/procfs/Makefile | 29 + .../prometheus/procfs/Makefile.common | 302 + .../procfs}/NOTICE | 0 vendor/github.com/prometheus/procfs/README.md | 61 + .../github.com/prometheus/procfs/SECURITY.md | 6 + vendor/github.com/prometheus/procfs/arp.go | 85 + .../github.com/prometheus/procfs/buddyinfo.go | 85 + .../github.com/prometheus/procfs/cpuinfo.go | 481 ++ .../procfs/cpuinfo_armx.go} | 16 +- .../prometheus/procfs/cpuinfo_mipsx.go | 19 + .../prometheus/procfs/cpuinfo_others.go | 19 + .../prometheus/procfs/cpuinfo_ppcx.go | 19 + .../prometheus/procfs/cpuinfo_riscvx.go | 19 + .../prometheus/procfs/cpuinfo_s390x.go | 18 + .../prometheus/procfs/cpuinfo_x86.go | 19 + vendor/github.com/prometheus/procfs/crypto.go | 153 + .../procfs}/doc.go | 0 .../prometheus/procfs/fixtures.ttar | 6553 +++++++++++++++++ vendor/github.com/prometheus/procfs/fs.go | 43 + .../github.com/prometheus/procfs/fscache.go | 422 ++ .../prometheus/procfs/internal/fs/fs.go | 55 + .../prometheus/procfs/internal/util/parse.go | 97 + .../procfs/internal/util/readfile.go | 38 + .../procfs/internal/util/sysreadfile.go | 48 + .../internal/util/sysreadfile_compat.go} | 21 +- .../procfs/internal/util/valueparser.go | 91 + .../procfs}/ipvs.go | 111 +- .../prometheus/procfs/kernel_random.go | 62 + .../github.com/prometheus/procfs/loadavg.go | 62 + vendor/github.com/prometheus/procfs/mdstat.go | 213 + .../github.com/prometheus/procfs/meminfo.go | 277 + .../github.com/prometheus/procfs/mountinfo.go | 180 + .../procfs}/mountstats.go | 154 +- .../prometheus/procfs/net_conntrackstat.go | 153 + .../github.com/prometheus/procfs/net_dev.go | 205 + .../prometheus/procfs/net_ip_socket.go | 220 + .../prometheus/procfs/net_protocols.go | 180 + .../prometheus/procfs/net_sockstat.go | 163 + .../prometheus/procfs/net_softnet.go | 102 + .../github.com/prometheus/procfs/net_tcp.go | 64 + .../github.com/prometheus/procfs/net_udp.go | 64 + .../github.com/prometheus/procfs/net_unix.go | 257 + .../procfs}/proc.go | 141 +- .../prometheus/procfs/proc_cgroup.go | 98 + .../prometheus/procfs/proc_environ.go | 37 + .../prometheus/procfs/proc_fdinfo.go | 133 + .../procfs}/proc_io.go | 34 +- .../procfs}/proc_limits.go | 109 +- .../github.com/prometheus/procfs/proc_maps.go | 209 + .../github.com/prometheus/procfs/proc_ns.go | 68 + .../github.com/prometheus/procfs/proc_psi.go | 100 + .../prometheus/procfs/proc_smaps.go | 165 + .../procfs}/proc_stat.go | 46 +- .../prometheus/procfs/proc_status.go | 170 + .../github.com/prometheus/procfs/schedstat.go | 121 + vendor/github.com/prometheus/procfs/slab.go | 151 + vendor/github.com/prometheus/procfs/stat.go | 244 + vendor/github.com/prometheus/procfs/swaps.go | 89 + vendor/github.com/prometheus/procfs/ttar | 413 ++ vendor/github.com/prometheus/procfs/vm.go | 210 + vendor/github.com/prometheus/procfs/xfrm.go | 186 + .../github.com/prometheus/procfs/zoneinfo.go | 196 + vendor/github.com/rubenv/sql-migrate/go.mod | 18 - vendor/github.com/rubenv/sql-migrate/go.sum | 237 - vendor/github.com/steveyen/gtreap/.gitignore | 5 - vendor/github.com/steveyen/gtreap/README.md | 90 - vendor/github.com/steveyen/gtreap/go.mod | 3 - vendor/github.com/steveyen/gtreap/treap.go | 188 - vendor/github.com/tinylib/msgp/LICENSE | 8 - .../tinylib/msgp/msgp/advise_linux.go | 24 - .../tinylib/msgp/msgp/advise_other.go | 17 - .../github.com/tinylib/msgp/msgp/circular.go | 39 - vendor/github.com/tinylib/msgp/msgp/defs.go | 142 - vendor/github.com/tinylib/msgp/msgp/edit.go | 242 - vendor/github.com/tinylib/msgp/msgp/elsize.go | 99 - vendor/github.com/tinylib/msgp/msgp/errors.go | 314 - .../github.com/tinylib/msgp/msgp/extension.go | 549 -- vendor/github.com/tinylib/msgp/msgp/file.go | 92 - .../github.com/tinylib/msgp/msgp/file_port.go | 47 - .../github.com/tinylib/msgp/msgp/integers.go | 174 - vendor/github.com/tinylib/msgp/msgp/json.go | 568 -- .../tinylib/msgp/msgp/json_bytes.go | 363 - vendor/github.com/tinylib/msgp/msgp/number.go | 267 - vendor/github.com/tinylib/msgp/msgp/purego.go | 15 - vendor/github.com/tinylib/msgp/msgp/read.go | 1358 ---- .../tinylib/msgp/msgp/read_bytes.go | 1197 --- vendor/github.com/tinylib/msgp/msgp/size.go | 38 - vendor/github.com/tinylib/msgp/msgp/unsafe.go | 41 - vendor/github.com/tinylib/msgp/msgp/write.go | 845 --- .../tinylib/msgp/msgp/write_bytes.go | 411 -- vendor/github.com/twmb/murmur3/.gitignore | 1 + vendor/github.com/twmb/murmur3/.travis.yml | 10 + vendor/github.com/twmb/murmur3/LICENSE | 49 + vendor/github.com/twmb/murmur3/README.md | 129 + vendor/github.com/twmb/murmur3/murmur.go | 72 + vendor/github.com/twmb/murmur3/murmur128.go | 182 + .../github.com/twmb/murmur3/murmur128_amd64.s | 246 + .../github.com/twmb/murmur3/murmur128_decl.go | 34 + .../github.com/twmb/murmur3/murmur128_gen.go | 135 + vendor/github.com/twmb/murmur3/murmur32.go | 100 + .../github.com/twmb/murmur3/murmur32_gen.go | 69 + vendor/github.com/twmb/murmur3/murmur64.go | 70 + vendor/github.com/uber-go/tally/glide.lock | 74 - vendor/github.com/uber-go/tally/glide.yaml | 44 - .../uber-go/tally/{ => v4}/.gitignore | 0 .../uber-go/tally/{ => v4}/.travis.yml | 4 +- .../github.com/uber-go/tally/{ => v4}/LICENSE | 0 .../uber-go/tally/{ => v4}/Makefile | 13 +- .../uber-go/tally/{ => v4}/README.md | 2 +- .../uber-go/tally/{ => v4}/check_license.sh | 0 .../uber-go/tally/{ => v4}/histogram.go | 162 +- .../tally/v4/internal/identity/accumulator.go | 131 + .../uber-go/tally/{ => v4}/key_gen.go | 2 +- .../github.com/uber-go/tally/{ => v4}/pool.go | 2 +- .../tally/{ => v4}/prometheus/README.md | 0 .../tally/{ => v4}/prometheus/config.go | 4 +- .../tally/{ => v4}/prometheus/reporter.go | 13 +- .../tally/{ => v4}/prometheus/sanitize.go | 38 +- .../uber-go/tally/{ => v4}/reporter.go | 2 +- .../uber-go/tally/{ => v4}/sanitize.go | 2 +- .../uber-go/tally/{ => v4}/scope.go | 120 +- .../uber-go/tally/{ => v4}/scope_registry.go | 56 +- .../uber-go/tally/{ => v4}/stats.go | 256 +- .../uber-go/tally/{ => v4}/types.go | 2 +- vendor/github.com/uber-go/tally/v4/version.go | 24 + vendor/github.com/willf/bitset/Makefile | 197 - vendor/go.etcd.io/bbolt/.gitignore | 5 - vendor/go.etcd.io/bbolt/.travis.yml | 17 - vendor/go.etcd.io/bbolt/LICENSE | 20 - vendor/go.etcd.io/bbolt/Makefile | 38 - vendor/go.etcd.io/bbolt/README.md | 957 --- vendor/go.etcd.io/bbolt/bolt_386.go | 7 - vendor/go.etcd.io/bbolt/bolt_amd64.go | 7 - vendor/go.etcd.io/bbolt/bolt_arm.go | 7 - vendor/go.etcd.io/bbolt/bolt_arm64.go | 9 - vendor/go.etcd.io/bbolt/bolt_linux.go | 10 - vendor/go.etcd.io/bbolt/bolt_mips64x.go | 9 - vendor/go.etcd.io/bbolt/bolt_mipsx.go | 9 - vendor/go.etcd.io/bbolt/bolt_openbsd.go | 27 - vendor/go.etcd.io/bbolt/bolt_ppc.go | 9 - vendor/go.etcd.io/bbolt/bolt_ppc64.go | 9 - vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 9 - vendor/go.etcd.io/bbolt/bolt_riscv64.go | 9 - vendor/go.etcd.io/bbolt/bolt_s390x.go | 9 - vendor/go.etcd.io/bbolt/bolt_unix.go | 93 - vendor/go.etcd.io/bbolt/bolt_unix_aix.go | 90 - vendor/go.etcd.io/bbolt/bolt_unix_solaris.go | 88 - vendor/go.etcd.io/bbolt/bolt_windows.go | 141 - vendor/go.etcd.io/bbolt/boltsync_unix.go | 8 - vendor/go.etcd.io/bbolt/bucket.go | 777 -- vendor/go.etcd.io/bbolt/cursor.go | 396 - vendor/go.etcd.io/bbolt/db.go | 1174 --- vendor/go.etcd.io/bbolt/doc.go | 44 - vendor/go.etcd.io/bbolt/errors.go | 71 - vendor/go.etcd.io/bbolt/freelist.go | 404 - vendor/go.etcd.io/bbolt/freelist_hmap.go | 178 - vendor/go.etcd.io/bbolt/go.mod | 5 - vendor/go.etcd.io/bbolt/go.sum | 2 - vendor/go.etcd.io/bbolt/node.go | 602 -- vendor/go.etcd.io/bbolt/page.go | 204 - vendor/go.etcd.io/bbolt/tx.go | 724 -- vendor/go.etcd.io/bbolt/unsafe.go | 39 - vendor/go.uber.org/atomic/.gitignore | 3 + vendor/go.uber.org/atomic/.travis.yml | 27 - vendor/go.uber.org/atomic/CHANGELOG.md | 44 +- vendor/go.uber.org/atomic/Makefile | 1 + vendor/go.uber.org/atomic/README.md | 4 +- vendor/go.uber.org/atomic/bool.go | 20 +- vendor/go.uber.org/atomic/bool_ext.go | 2 +- vendor/go.uber.org/atomic/duration.go | 20 +- vendor/go.uber.org/atomic/duration_ext.go | 8 +- vendor/go.uber.org/atomic/error.go | 12 +- vendor/go.uber.org/atomic/float64.go | 19 +- vendor/go.uber.org/atomic/float64_ext.go | 34 +- vendor/go.uber.org/atomic/gen.go | 1 + vendor/go.uber.org/atomic/go.mod | 8 - vendor/go.uber.org/atomic/go.sum | 9 - vendor/go.uber.org/atomic/int32.go | 24 +- vendor/go.uber.org/atomic/int64.go | 24 +- vendor/go.uber.org/atomic/string.go | 12 +- vendor/go.uber.org/atomic/string_ext.go | 2 + vendor/go.uber.org/atomic/time.go | 55 + vendor/go.uber.org/atomic/time_ext.go | 36 + vendor/go.uber.org/atomic/uint32.go | 24 +- vendor/go.uber.org/atomic/uint64.go | 24 +- vendor/go.uber.org/atomic/uintptr.go | 102 + vendor/go.uber.org/atomic/unsafe_pointer.go | 58 + vendor/go.uber.org/multierr/go.mod | 8 - vendor/go.uber.org/multierr/go.sum | 11 - vendor/go.uber.org/zap/.travis.yml | 23 - vendor/go.uber.org/zap/CHANGELOG.md | 84 + vendor/go.uber.org/zap/CONTRIBUTING.md | 6 - vendor/go.uber.org/zap/FAQ.md | 8 + vendor/go.uber.org/zap/Makefile | 16 +- vendor/go.uber.org/zap/README.md | 8 +- vendor/go.uber.org/zap/buffer/buffer.go | 18 + vendor/go.uber.org/zap/field.go | 10 + vendor/go.uber.org/zap/go.mod | 13 - vendor/go.uber.org/zap/go.sum | 56 - vendor/go.uber.org/zap/http_handler.go | 99 +- vendor/go.uber.org/zap/logger.go | 18 +- vendor/go.uber.org/zap/options.go | 8 + vendor/go.uber.org/zap/sugar.go | 31 +- .../zap/zapcore/buffered_write_syncer.go | 188 + vendor/go.uber.org/zap/zapcore/clock.go | 50 + .../zap/zapcore/console_encoder.go | 2 +- vendor/go.uber.org/zap/zapcore/entry.go | 10 +- vendor/go.uber.org/zap/zapcore/error.go | 21 +- vendor/go.uber.org/zap/zapcore/field.go | 8 +- .../go.uber.org/zap/zapcore/json_encoder.go | 12 +- vendor/go.uber.org/zap/zapcore/sampler.go | 14 +- .../go.uber.org/zap/zapcore/write_syncer.go | 3 +- .../zap/zaptest/observer/observer.go | 33 +- vendor/golang.org/x/sys/unix/README.md | 6 +- vendor/golang.org/x/sys/unix/aliases.go | 3 +- vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 1 + .../unix/{asm_freebsd_386.s => asm_bsd_386.s} | 10 +- .../{asm_darwin_amd64.s => asm_bsd_amd64.s} | 8 +- .../unix/{asm_freebsd_arm.s => asm_bsd_arm.s} | 8 +- .../{asm_netbsd_amd64.s => asm_bsd_arm64.s} | 8 +- vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 - vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 - .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 - .../x/sys/unix/asm_dragonfly_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 1 + .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 + vendor/golang.org/x/sys/unix/asm_linux_arm.s | 1 + .../golang.org/x/sys/unix/asm_linux_arm64.s | 1 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 1 + .../golang.org/x/sys/unix/asm_linux_mipsx.s | 1 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 1 + .../golang.org/x/sys/unix/asm_linux_riscv64.s | 4 +- .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 +- vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 - vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_arm64.s | 29 - .../x/sys/unix/asm_openbsd_mips64.s | 1 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 + vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 426 ++ vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 + vendor/golang.org/x/sys/unix/constants.go | 3 +- vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 4 +- vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 4 +- vendor/golang.org/x/sys/unix/dev_zos.go | 29 + vendor/golang.org/x/sys/unix/dirent.go | 1 + vendor/golang.org/x/sys/unix/endian_big.go | 1 + vendor/golang.org/x/sys/unix/endian_little.go | 1 + vendor/golang.org/x/sys/unix/env_unix.go | 3 +- vendor/golang.org/x/sys/unix/epoll_zos.go | 221 + vendor/golang.org/x/sys/unix/fcntl.go | 1 + .../x/sys/unix/fcntl_linux_32bit.go | 3 +- vendor/golang.org/x/sys/unix/fdset.go | 3 +- vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 + vendor/golang.org/x/sys/unix/gccgo.go | 4 +- .../x/sys/unix/gccgo_linux_amd64.go | 1 + vendor/golang.org/x/sys/unix/ioctl.go | 1 + vendor/golang.org/x/sys/unix/ioctl_linux.go | 196 + vendor/golang.org/x/sys/unix/ioctl_zos.go | 74 + vendor/golang.org/x/sys/unix/mkall.sh | 14 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 38 +- vendor/golang.org/x/sys/unix/pagesize_unix.go | 1 + vendor/golang.org/x/sys/unix/ptrace_darwin.go | 1 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 1 + vendor/golang.org/x/sys/unix/race.go | 1 + vendor/golang.org/x/sys/unix/race0.go | 3 +- .../x/sys/unix/readdirent_getdents.go | 1 + .../x/sys/unix/readdirent_getdirentries.go | 1 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 3 +- .../x/sys/unix/sockcmsg_unix_other.go | 7 +- vendor/golang.org/x/sys/unix/str.go | 1 + vendor/golang.org/x/sys/unix/syscall.go | 3 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 13 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 4 +- .../x/sys/unix/syscall_aix_ppc64.go | 4 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 9 +- .../x/sys/unix/syscall_darwin.1_12.go | 1 + .../x/sys/unix/syscall_darwin.1_13.go | 5 +- .../golang.org/x/sys/unix/syscall_darwin.go | 30 +- .../x/sys/unix/syscall_darwin_386.go | 50 - .../x/sys/unix/syscall_darwin_amd64.go | 1 + .../x/sys/unix/syscall_darwin_arm.go | 51 - .../x/sys/unix/syscall_darwin_arm64.go | 1 + .../x/sys/unix/syscall_darwin_libSystem.go | 10 +- .../x/sys/unix/syscall_dragonfly.go | 18 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 1 + .../golang.org/x/sys/unix/syscall_freebsd.go | 17 +- .../x/sys/unix/syscall_freebsd_386.go | 1 + .../x/sys/unix/syscall_freebsd_amd64.go | 1 + .../x/sys/unix/syscall_freebsd_arm.go | 1 + .../x/sys/unix/syscall_freebsd_arm64.go | 1 + .../golang.org/x/sys/unix/syscall_illumos.go | 105 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 193 +- .../x/sys/unix/syscall_linux_386.go | 11 +- .../x/sys/unix/syscall_linux_amd64.go | 7 +- .../x/sys/unix/syscall_linux_amd64_gc.go | 4 +- .../x/sys/unix/syscall_linux_arm.go | 15 +- .../x/sys/unix/syscall_linux_arm64.go | 7 +- .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 + .../x/sys/unix/syscall_linux_gc_386.go | 1 + .../x/sys/unix/syscall_linux_gc_arm.go | 1 + .../x/sys/unix/syscall_linux_gccgo_386.go | 1 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 + .../x/sys/unix/syscall_linux_mips64x.go | 7 +- .../x/sys/unix/syscall_linux_mipsx.go | 13 +- .../x/sys/unix/syscall_linux_ppc.go | 276 + .../x/sys/unix/syscall_linux_ppc64x.go | 9 +- .../x/sys/unix/syscall_linux_riscv64.go | 7 +- .../x/sys/unix/syscall_linux_s390x.go | 9 +- .../x/sys/unix/syscall_linux_sparc64.go | 9 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 21 +- .../x/sys/unix/syscall_netbsd_386.go | 1 + .../x/sys/unix/syscall_netbsd_amd64.go | 1 + .../x/sys/unix/syscall_netbsd_arm.go | 1 + .../x/sys/unix/syscall_netbsd_arm64.go | 1 + .../golang.org/x/sys/unix/syscall_openbsd.go | 4 +- .../x/sys/unix/syscall_openbsd_386.go | 1 + .../x/sys/unix/syscall_openbsd_amd64.go | 1 + .../x/sys/unix/syscall_openbsd_arm.go | 1 + .../x/sys/unix/syscall_openbsd_arm64.go | 1 + .../golang.org/x/sys/unix/syscall_solaris.go | 10 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 + vendor/golang.org/x/sys/unix/syscall_unix.go | 1 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 5 +- .../x/sys/unix/syscall_unix_gc_ppc64x.go | 1 + .../x/sys/unix/syscall_zos_s390x.go | 1829 +++++ vendor/golang.org/x/sys/unix/timestruct.go | 3 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 + .../x/sys/unix/zerrors_aix_ppc64.go | 1 + .../x/sys/unix/zerrors_darwin_386.go | 1788 ----- .../x/sys/unix/zerrors_darwin_amd64.go | 88 +- .../x/sys/unix/zerrors_darwin_arm.go | 1788 ----- .../x/sys/unix/zerrors_darwin_arm64.go | 88 +- .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 + .../x/sys/unix/zerrors_freebsd_386.go | 7 + .../x/sys/unix/zerrors_freebsd_amd64.go | 7 + .../x/sys/unix/zerrors_freebsd_arm.go | 16 + .../x/sys/unix/zerrors_freebsd_arm64.go | 7 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 141 +- .../x/sys/unix/zerrors_linux_386.go | 30 + .../x/sys/unix/zerrors_linux_amd64.go | 30 + .../x/sys/unix/zerrors_linux_arm.go | 30 + .../x/sys/unix/zerrors_linux_arm64.go | 31 + .../x/sys/unix/zerrors_linux_mips.go | 30 + .../x/sys/unix/zerrors_linux_mips64.go | 30 + .../x/sys/unix/zerrors_linux_mips64le.go | 30 + .../x/sys/unix/zerrors_linux_mipsle.go | 30 + .../x/sys/unix/zerrors_linux_ppc.go | 879 +++ .../x/sys/unix/zerrors_linux_ppc64.go | 30 + .../x/sys/unix/zerrors_linux_ppc64le.go | 30 + .../x/sys/unix/zerrors_linux_riscv64.go | 30 + .../x/sys/unix/zerrors_linux_s390x.go | 32 + .../x/sys/unix/zerrors_linux_sparc64.go | 30 + .../x/sys/unix/zerrors_netbsd_386.go | 1 + .../x/sys/unix/zerrors_netbsd_amd64.go | 1 + .../x/sys/unix/zerrors_netbsd_arm.go | 1 + .../x/sys/unix/zerrors_netbsd_arm64.go | 1 + .../x/sys/unix/zerrors_openbsd_386.go | 1 + .../x/sys/unix/zerrors_openbsd_amd64.go | 1 + .../x/sys/unix/zerrors_openbsd_arm.go | 1 + .../x/sys/unix/zerrors_openbsd_arm64.go | 1 + .../x/sys/unix/zerrors_openbsd_mips64.go | 1 + .../x/sys/unix/zerrors_solaris_amd64.go | 4 + .../x/sys/unix/zerrors_zos_s390x.go | 860 +++ .../x/sys/unix/zptrace_armnn_linux.go | 1 + .../x/sys/unix/zptrace_mipsnn_linux.go | 1 + .../x/sys/unix/zptrace_mipsnnle_linux.go | 1 + .../x/sys/unix/zptrace_x86_linux.go | 1 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1 + .../x/sys/unix/zsyscall_aix_ppc64.go | 1 + .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 4 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 4 +- .../x/sys/unix/zsyscall_darwin_386.1_13.go | 39 - .../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 - .../x/sys/unix/zsyscall_darwin_386.go | 2430 ------ .../x/sys/unix/zsyscall_darwin_386.s | 290 - .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 9 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 19 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 573 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 853 ++- .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 39 - .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 - .../x/sys/unix/zsyscall_darwin_arm.go | 2416 ------ .../x/sys/unix/zsyscall_darwin_arm.s | 288 - .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 9 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 19 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 573 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 853 ++- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 7 +- .../x/sys/unix/zsyscall_freebsd_386.go | 1 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 1 + .../x/sys/unix/zsyscall_freebsd_arm.go | 1 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 1 + .../x/sys/unix/zsyscall_illumos_amd64.go | 29 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 11 + .../x/sys/unix/zsyscall_linux_386.go | 1 + .../x/sys/unix/zsyscall_linux_amd64.go | 1 + .../x/sys/unix/zsyscall_linux_arm.go | 1 + .../x/sys/unix/zsyscall_linux_arm64.go | 1 + .../x/sys/unix/zsyscall_linux_mips.go | 1 + .../x/sys/unix/zsyscall_linux_mips64.go | 1 + .../x/sys/unix/zsyscall_linux_mips64le.go | 1 + .../x/sys/unix/zsyscall_linux_mipsle.go | 1 + .../x/sys/unix/zsyscall_linux_ppc.go | 762 ++ .../x/sys/unix/zsyscall_linux_ppc64.go | 1 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 1 + .../x/sys/unix/zsyscall_linux_riscv64.go | 1 + .../x/sys/unix/zsyscall_linux_s390x.go | 1 + .../x/sys/unix/zsyscall_linux_sparc64.go | 1 + .../x/sys/unix/zsyscall_netbsd_386.go | 11 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 11 + .../x/sys/unix/zsyscall_netbsd_arm.go | 11 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 11 + .../x/sys/unix/zsyscall_openbsd_386.go | 1 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 1 + .../x/sys/unix/zsyscall_openbsd_arm.go | 1 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 1 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 1 + .../x/sys/unix/zsyscall_solaris_amd64.go | 20 +- .../x/sys/unix/zsyscall_zos_s390x.go | 1255 ++++ .../x/sys/unix/zsysctl_openbsd_386.go | 1 + .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 + .../x/sys/unix/zsysctl_openbsd_arm.go | 1 + .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 + .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 + .../x/sys/unix/zsysnum_darwin_386.go | 437 -- .../x/sys/unix/zsysnum_darwin_amd64.go | 1 + .../x/sys/unix/zsysnum_darwin_arm.go | 437 -- .../x/sys/unix/zsysnum_darwin_arm64.go | 1 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 + .../x/sys/unix/zsysnum_freebsd_386.go | 1 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 + .../x/sys/unix/zsysnum_freebsd_arm.go | 1 + .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_386.go | 3 + .../x/sys/unix/zsysnum_linux_amd64.go | 3 + .../x/sys/unix/zsysnum_linux_arm.go | 3 + .../x/sys/unix/zsysnum_linux_arm64.go | 3 + .../x/sys/unix/zsysnum_linux_mips.go | 3 + .../x/sys/unix/zsysnum_linux_mips64.go | 3 + .../x/sys/unix/zsysnum_linux_mips64le.go | 3 + .../x/sys/unix/zsysnum_linux_mipsle.go | 3 + .../x/sys/unix/zsysnum_linux_ppc.go | 434 ++ .../x/sys/unix/zsysnum_linux_ppc64.go | 3 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 3 + .../x/sys/unix/zsysnum_linux_riscv64.go | 3 + .../x/sys/unix/zsysnum_linux_s390x.go | 3 + .../x/sys/unix/zsysnum_linux_sparc64.go | 3 + .../x/sys/unix/zsysnum_netbsd_386.go | 1 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 + .../x/sys/unix/zsysnum_netbsd_arm.go | 1 + .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 + .../x/sys/unix/zsysnum_openbsd_386.go | 1 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 + .../x/sys/unix/zsysnum_zos_s390x.go | 2670 +++++++ .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 + .../x/sys/unix/ztypes_darwin_386.go | 516 -- .../x/sys/unix/ztypes_darwin_amd64.go | 16 + .../x/sys/unix/ztypes_darwin_arm.go | 516 -- .../x/sys/unix/ztypes_darwin_arm64.go | 16 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 4 + .../x/sys/unix/ztypes_freebsd_386.go | 15 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 15 +- .../x/sys/unix/ztypes_freebsd_arm.go | 15 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 15 +- .../x/sys/unix/ztypes_illumos_amd64.go | 40 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 237 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 19 +- .../x/sys/unix/ztypes_linux_amd64.go | 19 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 19 +- .../x/sys/unix/ztypes_linux_arm64.go | 19 +- .../x/sys/unix/ztypes_linux_mips.go | 19 +- .../x/sys/unix/ztypes_linux_mips64.go | 19 +- .../x/sys/unix/ztypes_linux_mips64le.go | 19 +- .../x/sys/unix/ztypes_linux_mipsle.go | 19 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 639 ++ .../x/sys/unix/ztypes_linux_ppc64.go | 19 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 19 +- .../x/sys/unix/ztypes_linux_riscv64.go | 19 +- .../x/sys/unix/ztypes_linux_s390x.go | 19 +- .../x/sys/unix/ztypes_linux_sparc64.go | 19 +- .../x/sys/unix/ztypes_netbsd_386.go | 5 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 5 +- .../x/sys/unix/ztypes_netbsd_arm.go | 5 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 5 +- .../x/sys/unix/ztypes_openbsd_386.go | 5 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 5 +- .../x/sys/unix/ztypes_openbsd_arm.go | 5 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 5 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 5 +- .../x/sys/unix/ztypes_solaris_amd64.go | 1 + .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 406 + vendor/golang.org/x/sys/windows/empty.s | 1 + .../golang.org/x/sys/windows/exec_windows.go | 98 + vendor/golang.org/x/sys/windows/mkerrors.bash | 7 + .../x/sys/windows/security_windows.go | 29 +- .../x/sys/windows/syscall_windows.go | 191 +- .../golang.org/x/sys/windows/types_windows.go | 682 ++ .../x/sys/windows/types_windows_arm64.go | 34 + .../x/sys/windows/zerrors_windows.go | 2619 ++++++- .../x/sys/windows/zsyscall_windows.go | 350 +- .../x/text/internal/language/language.go | 43 +- .../x/text/internal/language/parse.go | 7 + vendor/golang.org/x/text/language/parse.go | 22 + .../api/annotations/annotations.pb.go | 11 +- .../googleapis/api/annotations/client.pb.go | 11 +- .../api/annotations/field_behavior.pb.go | 50 +- .../googleapis/api/annotations/http.pb.go | 11 +- .../googleapis/api/annotations/resource.pb.go | 11 +- .../googleapis/api/httpbody/httpbody.pb.go | 11 +- .../googleapis/rpc/status/status.pb.go | 9 +- vendor/google.golang.org/grpc/.travis.yml | 42 - vendor/google.golang.org/grpc/MAINTAINERS.md | 5 +- vendor/google.golang.org/grpc/Makefile | 2 - vendor/google.golang.org/grpc/NOTICE.txt | 13 + vendor/google.golang.org/grpc/README.md | 2 +- .../grpc/balancer/balancer.go | 68 +- .../grpc/balancer/base/balancer.go | 15 +- .../grpc/balancer/roundrobin/roundrobin.go | 4 +- .../grpc/balancer_conn_wrappers.go | 116 +- vendor/google.golang.org/grpc/clientconn.go | 355 +- .../grpc/cmd/protoc-gen-go-grpc/go.mod | 5 - .../grpc/cmd/protoc-gen-go-grpc/go.sum | 18 - .../grpc/connectivity/connectivity.go | 35 +- .../grpc/credentials/credentials.go | 24 +- .../grpc/credentials/go12.go | 30 - .../google.golang.org/grpc/credentials/tls.go | 3 + vendor/google.golang.org/grpc/dialoptions.go | 17 +- vendor/google.golang.org/grpc/go.mod | 17 - vendor/google.golang.org/grpc/go.sum | 99 - vendor/google.golang.org/grpc/install_gae.sh | 6 - .../grpc/internal/binarylog/sink.go | 41 +- .../grpc/internal/channelz/funcs.go | 2 +- .../grpc/internal/channelz/types_linux.go | 2 - .../grpc/internal/channelz/types_nonlinux.go | 5 +- .../grpc/internal/channelz/util_linux.go | 2 - .../grpc/internal/channelz/util_nonlinux.go | 3 +- .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/credentials/spiffe.go | 2 - .../internal/credentials/spiffe_appengine.go | 31 - .../grpc/internal/credentials/syscallconn.go | 2 - .../credentials/syscallconn_appengine.go | 30 - .../grpc/internal/credentials/util.go | 4 +- .../grpc/internal/envconfig/envconfig.go | 6 +- .../grpc/internal/grpcrand/grpcrand.go | 29 +- .../grpc/internal/internal.go | 11 +- .../grpc/internal/resolver/config_selector.go | 7 +- .../internal/resolver/dns/dns_resolver.go | 52 +- .../grpc/internal/resolver/dns/go113.go | 33 - .../internal/serviceconfig/serviceconfig.go | 20 +- .../grpc/internal/status/status.go | 14 +- .../grpc/internal/syscall/syscall_linux.go | 2 - .../grpc/internal/syscall/syscall_nonlinux.go | 21 +- .../grpc/internal/transport/controlbuf.go | 56 +- .../grpc/internal/transport/handler_server.go | 3 +- .../grpc/internal/transport/http2_client.go | 283 +- .../grpc/internal/transport/http2_server.go | 232 +- .../grpc/internal/transport/http_util.go | 224 +- .../transport/networktype/networktype.go | 2 +- .../grpc/internal/transport/transport.go | 19 +- .../grpc/internal/xds/env/env.go | 95 + .../grpc/internal/xds_handshake_cluster.go | 40 + .../grpc/metadata/metadata.go | 100 +- .../google.golang.org/grpc/picker_wrapper.go | 2 +- vendor/google.golang.org/grpc/pickfirst.go | 21 +- vendor/google.golang.org/grpc/regenerate.sh | 10 - .../grpc/resolver/resolver.go | 2 +- .../grpc/resolver_conn_wrapper.go | 71 +- vendor/google.golang.org/grpc/rpc_util.go | 50 +- vendor/google.golang.org/grpc/server.go | 218 +- vendor/google.golang.org/grpc/stats/stats.go | 11 +- vendor/google.golang.org/grpc/stream.go | 192 +- vendor/google.golang.org/grpc/tap/tap.go | 16 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 56 +- vendor/gopkg.in/yaml.v2/go.mod | 5 - vendor/gopkg.in/yaml.v3/go.mod | 5 - vendor/modules.txt | 272 +- 1438 files changed, 77199 insertions(+), 98926 deletions(-) delete mode 100644 gtreap_compact/iterator.go delete mode 100644 gtreap_compact/reader.go delete mode 100644 gtreap_compact/store.go delete mode 100644 gtreap_compact/writer.go create mode 100644 server/match_common.go delete mode 100644 vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go delete mode 100644 vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go delete mode 100644 vendor/github.com/RoaringBitmap/roaring/byte_input.go delete mode 100644 vendor/github.com/RoaringBitmap/roaring/go.mod delete mode 100644 vendor/github.com/RoaringBitmap/roaring/go.sum create mode 100644 vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go create mode 100644 vendor/github.com/RoaringBitmap/roaring/internal/pools.go delete mode 100644 vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go delete mode 100644 vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go create mode 100644 vendor/github.com/RoaringBitmap/roaring/setutil_arm64.go create mode 100644 vendor/github.com/RoaringBitmap/roaring/setutil_arm64.s create mode 100644 vendor/github.com/RoaringBitmap/roaring/setutil_generic.go create mode 100644 vendor/github.com/axiomhq/hyperloglog/.gitignore create mode 100644 vendor/github.com/axiomhq/hyperloglog/LICENSE create mode 100644 vendor/github.com/axiomhq/hyperloglog/README.md create mode 100644 vendor/github.com/axiomhq/hyperloglog/compressed.go create mode 100644 vendor/github.com/axiomhq/hyperloglog/hyperloglog.go create mode 100644 vendor/github.com/axiomhq/hyperloglog/registers.go create mode 100644 vendor/github.com/axiomhq/hyperloglog/sparse.go create mode 100644 vendor/github.com/axiomhq/hyperloglog/utils.go rename vendor/github.com/{willf => bits-and-blooms}/bitset/.gitignore (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/.travis.yml (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/LICENSE (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/README.md (70%) create mode 100644 vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml rename vendor/github.com/{willf => bits-and-blooms}/bitset/bitset.go (86%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/popcnt.go (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/popcnt_19.go (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/popcnt_amd64.go (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/popcnt_amd64.s (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/popcnt_generic.go (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/trailing_zeros_18.go (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/trailing_zeros_19.go (100%) delete mode 100644 vendor/github.com/blevesearch/bleve/v2/.travis.yml delete mode 100644 vendor/github.com/blevesearch/bleve/v2/CONTRIBUTING.md delete mode 100644 vendor/github.com/blevesearch/bleve/v2/README.md delete mode 100644 vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/standard/standard.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/analysis/datetime/flexible/flexible.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/analysis/datetime/optional/optional.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/analysis/freq.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/analyzer_en.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/stop_filter_en.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/builder.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/config.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/doc.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/document/document.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/document/field.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/document/field_boolean.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/document/field_composite.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/document/field_datetime.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/document/field_geopoint.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/document/field_numeric.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/document/field_text.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/error.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/geo/parse.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/go.mod delete mode 100644 vendor/github.com/blevesearch/bleve/v2/go.sum delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/README.md delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/builder.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/event.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/int.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/introducer.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/merge.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/persister.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/rollback.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/scorch.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/segment_plugin.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_doc.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_tfr.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_segment.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/analysis.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/benchmark_all.sh delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/dump.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_cache.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_dict.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/index_reader.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/reader.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/row.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/row_merge.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/stats.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/iterator.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/reader.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/store.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/writer.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/iterator.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/reader.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/store.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/writer.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.pb.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.proto delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index_alias.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index_alias_impl.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index_impl.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index_meta.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/index_stats.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/mapping.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/mapping/analysis.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/mapping/document.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/mapping/field.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/mapping/index.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/mapping/mapping.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/mapping/reflect.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/query.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/analyzer.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/cache.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/char_filter.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/datetime_parser.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/fragment_formatter.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/fragmenter.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/highlighter.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/index_type.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/registry.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/store.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/token_filter.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/token_maps.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/registry/tokenizer.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/collector.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/collector/list.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/collector/topn.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/facet/benchmark_data.txt delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_datetime.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_numeric.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_terms.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/facets_builder.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/highlight/format/html/html.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/highlight/fragmenter/simple/simple.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/html/html.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/simple/fragment_scorer_simple.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/simple/highlighter_simple.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/highlight/term_locations.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/levenshtein.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/bool_field.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/boolean.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/conjunction.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/date_range.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/disjunction.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/docid.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/fuzzy.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingbox.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingpolygon.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/geo_distance.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/match.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/match_all.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/match_none.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/match_phrase.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/multi_phrase.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/numeric_range.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/phrase.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/prefix.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/query.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/query_string.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/query_string.y delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/query_string_parser.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/regexp.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/term.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/term_range.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/wildcard.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_conjunction.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_constant.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_disjunction.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_term.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/search.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/searcher/search_docid.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/searcher/search_fuzzy.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_all.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/searcher/search_regexp.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_range.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/search/sort.go delete mode 100644 vendor/github.com/blevesearch/bleve/v2/size/sizes.go delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/.golangci.yml delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/README.md delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/analysis.go delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/batch.go delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/document.go delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/freq.go delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/go.mod delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/go.sum delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/index.go delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/indexing_options.go delete mode 100644 vendor/github.com/blevesearch/bleve_index_api/optimize.go delete mode 100644 vendor/github.com/blevesearch/go-porterstemmer/go.mod delete mode 100644 vendor/github.com/blevesearch/mmap-go/go.mod delete mode 100644 vendor/github.com/blevesearch/mmap-go/go.sum delete mode 100644 vendor/github.com/blevesearch/scorch_segment_api/v2/.golangci.yml delete mode 100644 vendor/github.com/blevesearch/scorch_segment_api/v2/README.md delete mode 100644 vendor/github.com/blevesearch/scorch_segment_api/v2/go.mod delete mode 100644 vendor/github.com/blevesearch/scorch_segment_api/v2/go.sum delete mode 100644 vendor/github.com/blevesearch/segment/go.mod delete mode 100644 vendor/github.com/blevesearch/snowballstem/go.mod delete mode 100644 vendor/github.com/blevesearch/upsidedown_store_api/README.md delete mode 100644 vendor/github.com/blevesearch/upsidedown_store_api/batch.go delete mode 100644 vendor/github.com/blevesearch/upsidedown_store_api/go.mod delete mode 100644 vendor/github.com/blevesearch/upsidedown_store_api/kvstore.go delete mode 100644 vendor/github.com/blevesearch/upsidedown_store_api/merge.go delete mode 100644 vendor/github.com/blevesearch/upsidedown_store_api/multiget.go delete mode 100644 vendor/github.com/blevesearch/vellum/go.mod delete mode 100644 vendor/github.com/blevesearch/vellum/go.sum delete mode 100644 vendor/github.com/blevesearch/zapx/v11/.golangci.yml delete mode 100644 vendor/github.com/blevesearch/zapx/v11/LICENSE delete mode 100644 vendor/github.com/blevesearch/zapx/v11/README.md delete mode 100644 vendor/github.com/blevesearch/zapx/v11/build.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/contentcoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/count.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/dict.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/docvalues.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/enumerator.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/go.mod delete mode 100644 vendor/github.com/blevesearch/zapx/v11/go.sum delete mode 100644 vendor/github.com/blevesearch/zapx/v11/intcoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/memuvarint.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/merge.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/new.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/posting.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/read.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/segment.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/sizes.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/write.go delete mode 100644 vendor/github.com/blevesearch/zapx/v11/zap.md delete mode 100644 vendor/github.com/blevesearch/zapx/v12/.golangci.yml delete mode 100644 vendor/github.com/blevesearch/zapx/v12/LICENSE delete mode 100644 vendor/github.com/blevesearch/zapx/v12/README.md delete mode 100644 vendor/github.com/blevesearch/zapx/v12/build.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/chunk.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/go.mod delete mode 100644 vendor/github.com/blevesearch/zapx/v12/go.sum delete mode 100644 vendor/github.com/blevesearch/zapx/v12/intDecoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/intcoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/memuvarint.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/merge.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/new.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/posting.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/read.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/segment.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/sizes.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/write.go delete mode 100644 vendor/github.com/blevesearch/zapx/v12/zap.md delete mode 100644 vendor/github.com/blevesearch/zapx/v13/.gitignore delete mode 100644 vendor/github.com/blevesearch/zapx/v13/.golangci.yml delete mode 100644 vendor/github.com/blevesearch/zapx/v13/LICENSE delete mode 100644 vendor/github.com/blevesearch/zapx/v13/README.md delete mode 100644 vendor/github.com/blevesearch/zapx/v13/build.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/chunk.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/contentcoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/count.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/dict.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/docvalues.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/enumerator.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/go.mod delete mode 100644 vendor/github.com/blevesearch/zapx/v13/go.sum delete mode 100644 vendor/github.com/blevesearch/zapx/v13/intDecoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/intcoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/memuvarint.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/merge.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/posting.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/read.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/segment.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/sizes.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/write.go delete mode 100644 vendor/github.com/blevesearch/zapx/v13/zap.md delete mode 100644 vendor/github.com/blevesearch/zapx/v14/.golangci.yml delete mode 100644 vendor/github.com/blevesearch/zapx/v14/LICENSE delete mode 100644 vendor/github.com/blevesearch/zapx/v14/README.md delete mode 100644 vendor/github.com/blevesearch/zapx/v14/build.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/contentcoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/count.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/dict.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/docvalues.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/enumerator.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/go.mod delete mode 100644 vendor/github.com/blevesearch/zapx/v14/go.sum delete mode 100644 vendor/github.com/blevesearch/zapx/v14/intDecoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/intcoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/merge.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/new.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/read.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/segment.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/sizes.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/write.go delete mode 100644 vendor/github.com/blevesearch/zapx/v14/zap.md delete mode 100644 vendor/github.com/blevesearch/zapx/v15/.gitignore delete mode 100644 vendor/github.com/blevesearch/zapx/v15/.golangci.yml delete mode 100644 vendor/github.com/blevesearch/zapx/v15/LICENSE delete mode 100644 vendor/github.com/blevesearch/zapx/v15/README.md delete mode 100644 vendor/github.com/blevesearch/zapx/v15/build.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/chunk.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/contentcoder.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/count.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/dict.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/docvalues.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/enumerator.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/go.mod delete mode 100644 vendor/github.com/blevesearch/zapx/v15/go.sum delete mode 100644 vendor/github.com/blevesearch/zapx/v15/memuvarint.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/merge.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/new.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/plugin.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/posting.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/read.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/segment.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/sizes.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/write.go delete mode 100644 vendor/github.com/blevesearch/zapx/v15/zap.md rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/.gitignore (93%) create mode 100644 vendor/github.com/blugelabs/bluge/.golangci.yml create mode 100644 vendor/github.com/blugelabs/bluge/AUTHORS create mode 100644 vendor/github.com/blugelabs/bluge/CONTRIBUTING.md rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/LICENSE (100%) create mode 100644 vendor/github.com/blugelabs/bluge/README.md rename vendor/github.com/{blevesearch/zapx/v14/plugin.go => blugelabs/bluge/analysis/analyzer/keyword.go} (71%) rename vendor/github.com/{blevesearch/zapx/v11/plugin.go => blugelabs/bluge/analysis/analyzer/simple.go} (62%) create mode 100644 vendor/github.com/blugelabs/bluge/analysis/analyzer/standard.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/analyzer/web.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/freq.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/lang/en/analyzer_en.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/analysis/lang/en/possessive_filter_en.go (73%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/analysis/lang/en/stemmer_en_snowball.go (68%) rename vendor/github.com/{blevesearch/bleve/v2/index/upsidedown/store/boltdb/stats.go => blugelabs/bluge/analysis/lang/en/stop_filter_en.go} (73%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/analysis/lang/en/stop_words_en.go (90%) create mode 100644 vendor/github.com/blugelabs/bluge/analysis/size.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/analysis/test_words.txt (100%) create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/apostrophe.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/camelcase.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/camelcase_parser.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/camelcase_states.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/dict.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/edgengram.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/elision.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/keyword.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/length.go rename vendor/github.com/{blevesearch/bleve/v2/analysis/token/lowercase => blugelabs/bluge/analysis/token}/lowercase.go (80%) create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/ngram.go rename vendor/github.com/{blevesearch/bleve/v2/analysis/token/porter => blugelabs/bluge/analysis/token}/porter.go (71%) create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/reverse.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/shingle.go rename vendor/github.com/{blevesearch/bleve/v2/analysis/token/stop => blugelabs/bluge/analysis/token}/stop.go (63%) create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/truncate.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/unicodenorm.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/token/unique.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/tokenizer/character.go create mode 100644 vendor/github.com/blugelabs/bluge/analysis/tokenizer/exception.go rename vendor/github.com/{blevesearch/zapx/v13/plugin.go => blugelabs/bluge/analysis/tokenizer/letter.go} (72%) create mode 100644 vendor/github.com/blugelabs/bluge/analysis/tokenizer/regexp.go rename vendor/github.com/{blevesearch/bleve/v2/analysis/tokenizer/single => blugelabs/bluge/analysis/tokenizer}/single.go (57%) rename vendor/github.com/{blevesearch/bleve/v2/analysis/tokenizer/unicode => blugelabs/bluge/analysis/tokenizer}/unicode.go (80%) create mode 100644 vendor/github.com/blugelabs/bluge/analysis/tokenizer/web.go rename vendor/github.com/{blevesearch/zapx/v12/plugin.go => blugelabs/bluge/analysis/tokenizer/whitespace.go} (72%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/analysis/tokenmap.go (91%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/analysis/type.go (71%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/analysis/util.go (100%) create mode 100644 vendor/github.com/blugelabs/bluge/batch.go create mode 100644 vendor/github.com/blugelabs/bluge/config.go create mode 100644 vendor/github.com/blugelabs/bluge/doc.go create mode 100644 vendor/github.com/blugelabs/bluge/document.go create mode 100644 vendor/github.com/blugelabs/bluge/field.go create mode 100644 vendor/github.com/blugelabs/bluge/index/batch.go create mode 100644 vendor/github.com/blugelabs/bluge/index/communication.go create mode 100644 vendor/github.com/blugelabs/bluge/index/config.go create mode 100644 vendor/github.com/blugelabs/bluge/index/count.go create mode 100644 vendor/github.com/blugelabs/bluge/index/deletion.go rename vendor/github.com/{blevesearch/bleve/v2/index/scorch/snapshot_index_dict.go => blugelabs/bluge/index/dictionary.go} (59%) create mode 100644 vendor/github.com/blugelabs/bluge/index/directory.go create mode 100644 vendor/github.com/blugelabs/bluge/index/directory_fs.go create mode 100644 vendor/github.com/blugelabs/bluge/index/directory_fs_nix.go rename vendor/github.com/{blevesearch/bleve/v2/search/scorer/sqrt_cache.go => blugelabs/bluge/index/directory_fs_windows.go} (69%) create mode 100644 vendor/github.com/blugelabs/bluge/index/directory_mem.go rename vendor/github.com/{blevesearch/bleve/v2/index/scorch => blugelabs/bluge/index}/empty.go (78%) create mode 100644 vendor/github.com/blugelabs/bluge/index/event.go create mode 100644 vendor/github.com/blugelabs/bluge/index/introducer.go create mode 100644 vendor/github.com/blugelabs/bluge/index/lock/lock.go rename vendor/github.com/{blevesearch/bleve/v2/analysis/analyzer/keyword/keyword.go => blugelabs/bluge/index/lock/lock_nix.go} (51%) create mode 100644 vendor/github.com/blugelabs/bluge/index/lock/lock_windows.go create mode 100644 vendor/github.com/blugelabs/bluge/index/merge.go rename vendor/github.com/{blevesearch/bleve/v2/index/scorch => blugelabs/bluge/index}/mergeplan/merge_plan.go (88%) rename vendor/github.com/{blevesearch/bleve/v2/index/scorch => blugelabs/bluge/index}/mergeplan/sort.go (92%) rename vendor/github.com/{blevesearch/bleve/v2/index/scorch => blugelabs/bluge/index}/optimize.go (68%) create mode 100644 vendor/github.com/blugelabs/bluge/index/persister.go create mode 100644 vendor/github.com/blugelabs/bluge/index/postings.go create mode 100644 vendor/github.com/blugelabs/bluge/index/postings_all.go create mode 100644 vendor/github.com/blugelabs/bluge/index/segment.go create mode 100644 vendor/github.com/blugelabs/bluge/index/segment_plugin.go create mode 100644 vendor/github.com/blugelabs/bluge/index/sizes.go create mode 100644 vendor/github.com/blugelabs/bluge/index/snapshot.go rename vendor/github.com/{blevesearch/bleve/v2/index/scorch => blugelabs/bluge/index}/stats.go (77%) rename vendor/github.com/{blevesearch/bleve/v2/index/scorch => blugelabs/bluge/index}/unadorned.go (76%) create mode 100644 vendor/github.com/blugelabs/bluge/index/writer.go create mode 100644 vendor/github.com/blugelabs/bluge/index/writer_offline.go create mode 100644 vendor/github.com/blugelabs/bluge/multisearch.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/numeric/bin.go (72%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/numeric/float.go (91%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge/numeric}/geo/README.md (96%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge/numeric}/geo/geo.go (91%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge/numeric}/geo/geo_dist.go (72%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge/numeric}/geo/geohash.go (84%) create mode 100644 vendor/github.com/blugelabs/bluge/numeric/geo/parse.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge/numeric}/geo/sloppy.go (97%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/numeric/prefix_coded.go (91%) create mode 100644 vendor/github.com/blugelabs/bluge/query.go create mode 100644 vendor/github.com/blugelabs/bluge/reader.go create mode 100644 vendor/github.com/blugelabs/bluge/search.go create mode 100644 vendor/github.com/blugelabs/bluge/search/aggregations.go create mode 100644 vendor/github.com/blugelabs/bluge/search/aggregations/cardinality.go create mode 100644 vendor/github.com/blugelabs/bluge/search/aggregations/count.go create mode 100644 vendor/github.com/blugelabs/bluge/search/aggregations/duration.go create mode 100644 vendor/github.com/blugelabs/bluge/search/aggregations/filter.go create mode 100644 vendor/github.com/blugelabs/bluge/search/aggregations/metric.go create mode 100644 vendor/github.com/blugelabs/bluge/search/aggregations/percentiles.go create mode 100644 vendor/github.com/blugelabs/bluge/search/aggregations/range.go create mode 100644 vendor/github.com/blugelabs/bluge/search/aggregations/range_date.go create mode 100644 vendor/github.com/blugelabs/bluge/search/aggregations/terms.go create mode 100644 vendor/github.com/blugelabs/bluge/search/collector.go create mode 100644 vendor/github.com/blugelabs/bluge/search/collector/all.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/collector/heap.go (96%) create mode 100644 vendor/github.com/blugelabs/bluge/search/collector/iterator.go rename vendor/github.com/{blevesearch/bleve_index_api/sizes.go => blugelabs/bluge/search/collector/size.go} (77%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/collector/slice.go (95%) create mode 100644 vendor/github.com/blugelabs/bluge/search/collector/topn.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/explanation.go (66%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/pool.go (86%) create mode 100644 vendor/github.com/blugelabs/bluge/search/search.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/ordered_searchers_list.go (91%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_boolean.go (52%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_conjunction.go (64%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_disjunction.go (66%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_disjunction_heap.go (65%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_disjunction_slice.go (67%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_filter.go (69%) create mode 100644 vendor/github.com/blugelabs/bluge/search/searcher/search_fuzzy.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_geoboundingbox.go (67%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_geopointdistance.go (65%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_geopolygon.go (74%) create mode 100644 vendor/github.com/blugelabs/bluge/search/searcher/search_match_all.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_match_none.go (51%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_multi_term.go (57%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_numeric_range.go (63%) rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_phrase.go (72%) rename vendor/github.com/{blevesearch/bleve/v2/index/scorch/regexp.go => blugelabs/bluge/search/searcher/search_regexp.go} (60%) create mode 100644 vendor/github.com/blugelabs/bluge/search/searcher/search_term.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/searcher/search_term_prefix.go (64%) create mode 100644 vendor/github.com/blugelabs/bluge/search/searcher/search_term_range.go create mode 100644 vendor/github.com/blugelabs/bluge/search/searcher/size.go create mode 100644 vendor/github.com/blugelabs/bluge/search/similarity/bm25.go create mode 100644 vendor/github.com/blugelabs/bluge/search/similarity/composite.go create mode 100644 vendor/github.com/blugelabs/bluge/search/similarity/constant.go create mode 100644 vendor/github.com/blugelabs/bluge/search/size.go create mode 100644 vendor/github.com/blugelabs/bluge/search/sort.go create mode 100644 vendor/github.com/blugelabs/bluge/search/source.go rename vendor/github.com/{blevesearch/bleve/v2 => blugelabs/bluge}/search/util.go (88%) create mode 100644 vendor/github.com/blugelabs/bluge/size.go create mode 100644 vendor/github.com/blugelabs/bluge/writer.go create mode 100644 vendor/github.com/blugelabs/bluge/writer_offline.go rename vendor/github.com/{blevesearch/zapx/v14 => blugelabs/bluge_segment_api}/.gitignore (86%) create mode 100644 vendor/github.com/blugelabs/bluge_segment_api/.golangci.yml create mode 100644 vendor/github.com/blugelabs/bluge_segment_api/AUTHORS create mode 100644 vendor/github.com/blugelabs/bluge_segment_api/CONTRIBUTING.md rename vendor/github.com/{blevesearch/bleve_index_api => blugelabs/bluge_segment_api}/LICENSE (100%) create mode 100644 vendor/github.com/blugelabs/bluge_segment_api/README.md rename vendor/github.com/{blevesearch/scorch_segment_api/v2 => blugelabs/bluge_segment_api}/automaton.go (90%) create mode 100644 vendor/github.com/blugelabs/bluge_segment_api/data.go create mode 100644 vendor/github.com/blugelabs/bluge_segment_api/document.go rename vendor/github.com/{blevesearch/scorch_segment_api/v2 => blugelabs/bluge_segment_api}/segment.go (53%) create mode 100644 vendor/github.com/blugelabs/bluge_segment_api/stats.go rename vendor/github.com/{blevesearch/zapx/v11 => blugelabs/ice}/.gitignore (86%) create mode 100644 vendor/github.com/blugelabs/ice/.golangci.yml create mode 100644 vendor/github.com/blugelabs/ice/AUTHORS create mode 100644 vendor/github.com/blugelabs/ice/CONTRIBUTING.md rename vendor/github.com/{blevesearch/scorch_segment_api/v2 => blugelabs/ice}/LICENSE (100%) create mode 100644 vendor/github.com/blugelabs/ice/README.md rename vendor/github.com/{blevesearch/zapx/v14 => blugelabs/ice}/chunk.go (52%) rename vendor/github.com/{blevesearch/zapx/v12 => blugelabs/ice}/contentcoder.go (87%) rename vendor/github.com/{blevesearch/zapx/v12 => blugelabs/ice}/count.go (59%) rename vendor/github.com/{blevesearch/zapx/v12 => blugelabs/ice}/dict.go (81%) rename vendor/github.com/{blevesearch/zapx/v12 => blugelabs/ice}/docvalues.go (66%) rename vendor/github.com/{blevesearch/zapx/v12 => blugelabs/ice}/enumerator.go (91%) create mode 100644 vendor/github.com/blugelabs/ice/footer.go rename vendor/github.com/{blevesearch/bleve/v2/search/query/boost.go => blugelabs/ice/freq.go} (62%) rename vendor/github.com/{blevesearch/zapx/v15 => blugelabs/ice}/intcoder.go (85%) rename vendor/github.com/{blevesearch/zapx/v15/intDecoder.go => blugelabs/ice/intdecoder.go} (72%) create mode 100644 vendor/github.com/blugelabs/ice/load.go rename vendor/github.com/{blevesearch/zapx/v14 => blugelabs/ice}/memuvarint.go (66%) create mode 100644 vendor/github.com/blugelabs/ice/merge.go rename vendor/github.com/{blevesearch/zapx/v13 => blugelabs/ice}/new.go (54%) rename vendor/github.com/{blevesearch/zapx/v14 => blugelabs/ice}/posting.go (73%) create mode 100644 vendor/github.com/blugelabs/ice/read.go create mode 100644 vendor/github.com/blugelabs/ice/segment.go create mode 100644 vendor/github.com/blugelabs/ice/sizes.go create mode 100644 vendor/github.com/blugelabs/ice/stats.go create mode 100644 vendor/github.com/blugelabs/ice/write.go rename vendor/github.com/{blevesearch/zapx/v12 => blugelabs/query_string}/.gitignore (78%) create mode 100644 vendor/github.com/blugelabs/query_string/.golangci.yml rename vendor/github.com/{blevesearch/upsidedown_store_api => blugelabs/query_string}/LICENSE (100%) create mode 100644 vendor/github.com/blugelabs/query_string/README.md create mode 100644 vendor/github.com/blugelabs/query_string/query_string.y rename vendor/github.com/{blevesearch/bleve/v2/search/query => blugelabs/query_string}/query_string.y.go (56%) rename vendor/github.com/{blevesearch/bleve/v2/search/query => blugelabs/query_string}/query_string_lex.go (83%) create mode 100644 vendor/github.com/blugelabs/query_string/query_string_parser.go create mode 100644 vendor/github.com/caio/go-tdigest/.gitignore create mode 100644 vendor/github.com/caio/go-tdigest/CONTRIBUTING.md create mode 100644 vendor/github.com/caio/go-tdigest/Gopkg.lock create mode 100644 vendor/github.com/caio/go-tdigest/Gopkg.toml create mode 100644 vendor/github.com/caio/go-tdigest/LICENSE create mode 100644 vendor/github.com/caio/go-tdigest/README.md create mode 100644 vendor/github.com/caio/go-tdigest/options.go create mode 100644 vendor/github.com/caio/go-tdigest/rng.go create mode 100644 vendor/github.com/caio/go-tdigest/serialization.go create mode 100644 vendor/github.com/caio/go-tdigest/summary.go create mode 100644 vendor/github.com/caio/go-tdigest/tdigest.go create mode 100644 vendor/github.com/cespare/xxhash/v2/.travis.yml rename vendor/github.com/{steveyen/gtreap/LICENSE => cespare/xxhash/v2/LICENSE.txt} (89%) create mode 100644 vendor/github.com/cespare/xxhash/v2/README.md create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/.travis.yml delete mode 100644 vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/claims.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/map_claims.go create mode 100644 vendor/github.com/dgryski/go-metro/LICENSE create mode 100644 vendor/github.com/dgryski/go-metro/README create mode 100644 vendor/github.com/dgryski/go-metro/metro.py create mode 100644 vendor/github.com/dgryski/go-metro/metro128.go create mode 100644 vendor/github.com/dgryski/go-metro/metro64.go create mode 100644 vendor/github.com/dgryski/go-metro/metro_amd64.s create mode 100644 vendor/github.com/dgryski/go-metro/metro_stub.go create mode 100644 vendor/github.com/dop251/goja/builtin_promise.go create mode 100644 vendor/github.com/dop251/goja/destruct.go delete mode 100644 vendor/github.com/dop251/goja/go.mod delete mode 100644 vendor/github.com/dop251/goja/go.sum delete mode 100644 vendor/github.com/felixge/httpsnoop/go.mod delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/.gitignore delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/LICENSE delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/README.md delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/binary.dat delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/binary.dat.snappy delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/rbuf.go delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/snap.go delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/unenc.txt delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/unenc.txt.snappy delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/unsnap.go rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/.gitignore (68%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/LICENSE (96%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/README.md (60%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/VERSION_HISTORY.md (84%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/claims.go rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/doc.go (100%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/ecdsa.go (79%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/ecdsa_utils.go (81%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ed25519.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/errors.go (88%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/hmac.go (90%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/map_claims.go rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/none.go (94%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/parser.go (95%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/rsa.go (92%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/rsa_pss.go (94%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/rsa_utils.go (72%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/signing_method.go (79%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v4}/token.go (74%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/types.go delete mode 100644 vendor/github.com/golang/glog/README create mode 100644 vendor/github.com/golang/glog/README.md delete mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go delete mode 100644 vendor/github.com/golang/snappy/go.mod delete mode 100644 vendor/github.com/gorilla/handlers/go.mod delete mode 100644 vendor/github.com/gorilla/handlers/go.sum delete mode 100644 vendor/github.com/gorilla/mux/go.mod delete mode 100644 vendor/github.com/gorilla/websocket/go.mod delete mode 100644 vendor/github.com/gorilla/websocket/go.sum create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/naming.go delete mode 100644 vendor/github.com/jackc/chunkreader/v2/go.mod delete mode 100644 vendor/github.com/jackc/pgconn/go.mod delete mode 100644 vendor/github.com/jackc/pgconn/go.sum delete mode 100644 vendor/github.com/jackc/pgerrcode/go.mod delete mode 100644 vendor/github.com/jackc/pgio/go.mod delete mode 100644 vendor/github.com/jackc/pgpassfile/go.mod delete mode 100644 vendor/github.com/jackc/pgpassfile/go.sum delete mode 100644 vendor/github.com/jackc/pgproto3/v2/go.mod delete mode 100644 vendor/github.com/jackc/pgproto3/v2/go.sum delete mode 100644 vendor/github.com/jackc/pgservicefile/go.mod delete mode 100644 vendor/github.com/jackc/pgservicefile/go.sum delete mode 100644 vendor/github.com/jackc/pgtype/go.mod delete mode 100644 vendor/github.com/jackc/pgtype/go.sum delete mode 100644 vendor/github.com/jackc/pgx/v4/go.mod delete mode 100644 vendor/github.com/jackc/pgx/v4/go.sum delete mode 100644 vendor/github.com/m3db/prometheus_client_golang/AUTHORS.md delete mode 100644 vendor/github.com/m3db/prometheus_client_golang/prometheus/collector.go delete mode 100644 vendor/github.com/m3db/prometheus_client_golang/prometheus/counter.go delete mode 100644 vendor/github.com/m3db/prometheus_client_golang/prometheus/gauge.go delete mode 100644 vendor/github.com/m3db/prometheus_client_golang/prometheus/histogram.go delete mode 100644 vendor/github.com/m3db/prometheus_client_golang/prometheus/http.go delete mode 100644 vendor/github.com/m3db/prometheus_client_golang/prometheus/process_collector.go delete mode 100644 vendor/github.com/m3db/prometheus_client_golang/prometheus/promhttp/http.go delete mode 100644 vendor/github.com/m3db/prometheus_client_golang/prometheus/untyped.go delete mode 100644 vendor/github.com/m3db/prometheus_client_golang/prometheus/vec.go delete mode 100644 vendor/github.com/m3db/prometheus_client_model/go/metrics.pb.go delete mode 100644 vendor/github.com/m3db/prometheus_common/expfmt/encode.go delete mode 100644 vendor/github.com/m3db/prometheus_common/expfmt/text_create.go delete mode 100644 vendor/github.com/m3db/prometheus_procfs/.travis.yml delete mode 100644 vendor/github.com/m3db/prometheus_procfs/AUTHORS.md delete mode 100644 vendor/github.com/m3db/prometheus_procfs/CONTRIBUTING.md delete mode 100644 vendor/github.com/m3db/prometheus_procfs/Makefile delete mode 100644 vendor/github.com/m3db/prometheus_procfs/README.md delete mode 100644 vendor/github.com/m3db/prometheus_procfs/fs.go delete mode 100644 vendor/github.com/m3db/prometheus_procfs/mdstat.go delete mode 100644 vendor/github.com/m3db/prometheus_procfs/stat.go delete mode 100644 vendor/github.com/mschoch/smat/go.mod delete mode 100644 vendor/github.com/philhofer/fwd/LICENSE.md delete mode 100644 vendor/github.com/philhofer/fwd/README.md delete mode 100644 vendor/github.com/philhofer/fwd/reader.go delete mode 100644 vendor/github.com/philhofer/fwd/writer.go delete mode 100644 vendor/github.com/philhofer/fwd/writer_appengine.go delete mode 100644 vendor/github.com/philhofer/fwd/writer_unsafe.go rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/LICENSE (100%) rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/NOTICE (100%) rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/.gitignore (100%) rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/README.md (100%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/desc.go (76%) rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/doc.go (59%) rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/expvar_collector.go (50%) rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/fnv.go (50%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/go_collector.go (59%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/metric.go (64%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/registry.go (51%) rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/summary.go (51%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go rename vendor/github.com/{m3db/prometheus_client_golang => prometheus/client_golang}/prometheus/value.go (61%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go rename vendor/github.com/{m3db/prometheus_client_model => prometheus/client_model}/LICENSE (100%) rename vendor/github.com/{m3db/prometheus_client_model => prometheus/client_model}/NOTICE (100%) create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/LICENSE (100%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/NOTICE (100%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/expfmt/decode.go (98%) create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/expfmt/expfmt.go (76%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/expfmt/fuzz.go (100%) create mode 100644 vendor/github.com/prometheus/common/expfmt/openmetrics_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/expfmt/text_parse.go (96%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/internal/bitbucket.org/ww/goautoneg/README.txt (100%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/internal/bitbucket.org/ww/goautoneg/autoneg.go (100%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/alert.go (100%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/fingerprinting.go (100%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/fnv.go (95%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/labels.go (94%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/labelset.go (100%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/metric.go (99%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/model.go (100%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/signature.go (100%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/silence.go (100%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/time.go (65%) rename vendor/github.com/{m3db/prometheus_common => prometheus/common}/model/value.go (100%) create mode 100644 vendor/github.com/prometheus/procfs/.gitignore create mode 100644 vendor/github.com/prometheus/procfs/.golangci.yml create mode 100644 vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md rename vendor/github.com/{m3db/prometheus_procfs => prometheus/procfs}/LICENSE (100%) create mode 100644 vendor/github.com/prometheus/procfs/MAINTAINERS.md create mode 100644 vendor/github.com/prometheus/procfs/Makefile create mode 100644 vendor/github.com/prometheus/procfs/Makefile.common rename vendor/github.com/{m3db/prometheus_procfs => prometheus/procfs}/NOTICE (100%) create mode 100644 vendor/github.com/prometheus/procfs/README.md create mode 100644 vendor/github.com/prometheus/procfs/SECURITY.md create mode 100644 vendor/github.com/prometheus/procfs/arp.go create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo.go rename vendor/github.com/{blevesearch/bleve/v2/config_app.go => prometheus/procfs/cpuinfo_armx.go} (64%) create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_others.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_s390x.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_x86.go create mode 100644 vendor/github.com/prometheus/procfs/crypto.go rename vendor/github.com/{m3db/prometheus_procfs => prometheus/procfs}/doc.go (100%) create mode 100644 vendor/github.com/prometheus/procfs/fixtures.ttar create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/fscache.go create mode 100644 vendor/github.com/prometheus/procfs/internal/fs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/readfile.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go rename vendor/github.com/{blevesearch/bleve/v2/config_disk.go => prometheus/procfs/internal/util/sysreadfile_compat.go} (55%) create mode 100644 vendor/github.com/prometheus/procfs/internal/util/valueparser.go rename vendor/github.com/{m3db/prometheus_procfs => prometheus/procfs}/ipvs.go (66%) create mode 100644 vendor/github.com/prometheus/procfs/kernel_random.go create mode 100644 vendor/github.com/prometheus/procfs/loadavg.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/meminfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go rename vendor/github.com/{m3db/prometheus_procfs => prometheus/procfs}/mountstats.go (77%) create mode 100644 vendor/github.com/prometheus/procfs/net_conntrackstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/net_ip_socket.go create mode 100644 vendor/github.com/prometheus/procfs/net_protocols.go create mode 100644 vendor/github.com/prometheus/procfs/net_sockstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_softnet.go create mode 100644 vendor/github.com/prometheus/procfs/net_tcp.go create mode 100644 vendor/github.com/prometheus/procfs/net_udp.go create mode 100644 vendor/github.com/prometheus/procfs/net_unix.go rename vendor/github.com/{m3db/prometheus_procfs => prometheus/procfs}/proc.go (55%) create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroup.go create mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go create mode 100644 vendor/github.com/prometheus/procfs/proc_fdinfo.go rename vendor/github.com/{m3db/prometheus_procfs => prometheus/procfs}/proc_io.go (50%) rename vendor/github.com/{m3db/prometheus_procfs => prometheus/procfs}/proc_limits.go (51%) create mode 100644 vendor/github.com/prometheus/procfs/proc_maps.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_psi.go create mode 100644 vendor/github.com/prometheus/procfs/proc_smaps.go rename vendor/github.com/{m3db/prometheus_procfs => prometheus/procfs}/proc_stat.go (79%) create mode 100644 vendor/github.com/prometheus/procfs/proc_status.go create mode 100644 vendor/github.com/prometheus/procfs/schedstat.go create mode 100644 vendor/github.com/prometheus/procfs/slab.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/swaps.go create mode 100644 vendor/github.com/prometheus/procfs/ttar create mode 100644 vendor/github.com/prometheus/procfs/vm.go create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/zoneinfo.go delete mode 100644 vendor/github.com/rubenv/sql-migrate/go.mod delete mode 100644 vendor/github.com/rubenv/sql-migrate/go.sum delete mode 100644 vendor/github.com/steveyen/gtreap/.gitignore delete mode 100644 vendor/github.com/steveyen/gtreap/README.md delete mode 100644 vendor/github.com/steveyen/gtreap/go.mod delete mode 100644 vendor/github.com/steveyen/gtreap/treap.go delete mode 100644 vendor/github.com/tinylib/msgp/LICENSE delete mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_linux.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_other.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/circular.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/defs.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/edit.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/elsize.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/errors.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/extension.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/file.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/file_port.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/integers.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/json.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/json_bytes.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/number.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/purego.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/read.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/read_bytes.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/size.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/unsafe.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/write.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/write_bytes.go create mode 100644 vendor/github.com/twmb/murmur3/.gitignore create mode 100644 vendor/github.com/twmb/murmur3/.travis.yml create mode 100644 vendor/github.com/twmb/murmur3/LICENSE create mode 100644 vendor/github.com/twmb/murmur3/README.md create mode 100644 vendor/github.com/twmb/murmur3/murmur.go create mode 100644 vendor/github.com/twmb/murmur3/murmur128.go create mode 100644 vendor/github.com/twmb/murmur3/murmur128_amd64.s create mode 100644 vendor/github.com/twmb/murmur3/murmur128_decl.go create mode 100644 vendor/github.com/twmb/murmur3/murmur128_gen.go create mode 100644 vendor/github.com/twmb/murmur3/murmur32.go create mode 100644 vendor/github.com/twmb/murmur3/murmur32_gen.go create mode 100644 vendor/github.com/twmb/murmur3/murmur64.go delete mode 100644 vendor/github.com/uber-go/tally/glide.lock delete mode 100644 vendor/github.com/uber-go/tally/glide.yaml rename vendor/github.com/uber-go/tally/{ => v4}/.gitignore (100%) rename vendor/github.com/uber-go/tally/{ => v4}/.travis.yml (91%) rename vendor/github.com/uber-go/tally/{ => v4}/LICENSE (100%) rename vendor/github.com/uber-go/tally/{ => v4}/Makefile (86%) rename vendor/github.com/uber-go/tally/{ => v4}/README.md (99%) rename vendor/github.com/uber-go/tally/{ => v4}/check_license.sh (100%) rename vendor/github.com/uber-go/tally/{ => v4}/histogram.go (74%) create mode 100644 vendor/github.com/uber-go/tally/v4/internal/identity/accumulator.go rename vendor/github.com/uber-go/tally/{ => v4}/key_gen.go (98%) rename vendor/github.com/uber-go/tally/{ => v4}/pool.go (97%) rename vendor/github.com/uber-go/tally/{ => v4}/prometheus/README.md (100%) rename vendor/github.com/uber-go/tally/{ => v4}/prometheus/config.go (98%) rename vendor/github.com/uber-go/tally/{ => v4}/prometheus/reporter.go (98%) rename vendor/github.com/uber-go/tally/{ => v4}/prometheus/sanitize.go (62%) rename vendor/github.com/uber-go/tally/{ => v4}/reporter.go (98%) rename vendor/github.com/uber-go/tally/{ => v4}/sanitize.go (99%) rename vendor/github.com/uber-go/tally/{ => v4}/scope.go (90%) rename vendor/github.com/uber-go/tally/{ => v4}/scope_registry.go (74%) rename vendor/github.com/uber-go/tally/{ => v4}/stats.go (68%) rename vendor/github.com/uber-go/tally/{ => v4}/types.go (99%) create mode 100644 vendor/github.com/uber-go/tally/v4/version.go delete mode 100644 vendor/github.com/willf/bitset/Makefile delete mode 100644 vendor/go.etcd.io/bbolt/.gitignore delete mode 100644 vendor/go.etcd.io/bbolt/.travis.yml delete mode 100644 vendor/go.etcd.io/bbolt/LICENSE delete mode 100644 vendor/go.etcd.io/bbolt/Makefile delete mode 100644 vendor/go.etcd.io/bbolt/README.md delete mode 100644 vendor/go.etcd.io/bbolt/bolt_386.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_amd64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_arm.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_arm64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_linux.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_mips64x.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_mipsx.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_openbsd.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64le.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_riscv64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_s390x.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_aix.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_solaris.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_windows.go delete mode 100644 vendor/go.etcd.io/bbolt/boltsync_unix.go delete mode 100644 vendor/go.etcd.io/bbolt/bucket.go delete mode 100644 vendor/go.etcd.io/bbolt/cursor.go delete mode 100644 vendor/go.etcd.io/bbolt/db.go delete mode 100644 vendor/go.etcd.io/bbolt/doc.go delete mode 100644 vendor/go.etcd.io/bbolt/errors.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist_hmap.go delete mode 100644 vendor/go.etcd.io/bbolt/go.mod delete mode 100644 vendor/go.etcd.io/bbolt/go.sum delete mode 100644 vendor/go.etcd.io/bbolt/node.go delete mode 100644 vendor/go.etcd.io/bbolt/page.go delete mode 100644 vendor/go.etcd.io/bbolt/tx.go delete mode 100644 vendor/go.etcd.io/bbolt/unsafe.go delete mode 100644 vendor/go.uber.org/atomic/.travis.yml delete mode 100644 vendor/go.uber.org/atomic/go.mod delete mode 100644 vendor/go.uber.org/atomic/go.sum create mode 100644 vendor/go.uber.org/atomic/time.go create mode 100644 vendor/go.uber.org/atomic/time_ext.go create mode 100644 vendor/go.uber.org/atomic/uintptr.go create mode 100644 vendor/go.uber.org/atomic/unsafe_pointer.go delete mode 100644 vendor/go.uber.org/multierr/go.mod delete mode 100644 vendor/go.uber.org/multierr/go.sum delete mode 100644 vendor/go.uber.org/zap/.travis.yml delete mode 100644 vendor/go.uber.org/zap/go.mod delete mode 100644 vendor/go.uber.org/zap/go.sum create mode 100644 vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go create mode 100644 vendor/go.uber.org/zap/zapcore/clock.go rename vendor/golang.org/x/sys/unix/{asm_freebsd_386.s => asm_bsd_386.s} (72%) rename vendor/golang.org/x/sys/unix/{asm_darwin_amd64.s => asm_bsd_amd64.s} (72%) rename vendor/golang.org/x/sys/unix/{asm_freebsd_arm.s => asm_bsd_arm.s} (76%) rename vendor/golang.org/x/sys/unix/{asm_netbsd_amd64.s => asm_bsd_arm64.s} (75%) delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_zos_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/dev_zos.go create mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go create mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_linux.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt delete mode 100644 vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.mod delete mode 100644 vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.sum delete mode 100644 vendor/google.golang.org/grpc/credentials/go12.go delete mode 100644 vendor/google.golang.org/grpc/go.mod delete mode 100644 vendor/google.golang.org/grpc/go.sum delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go delete mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go delete mode 100644 vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go delete mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/go113.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/env/env.go create mode 100644 vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go delete mode 100644 vendor/gopkg.in/yaml.v2/go.mod delete mode 100644 vendor/gopkg.in/yaml.v3/go.mod diff --git a/CHANGELOG.md b/CHANGELOG.md index a0254d8f0..384ca2df0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,8 +5,13 @@ The format is based on [keep a changelog](http://keepachangelog.com) and this pr ## [Unreleased] ### Added -- Periodically check database hostname for underlying address changes. -- Expose Nakama errors to the runtime. +- Allow creation of relayed matches with a name. Names will be mapped to match identifiers. +- Expose Nakama errors to the server runtime. +- The wallet ledger view in the Nakama Console now supports pagination. + +### Changed +- Periodically check database hostname for underlying address changes more frequently. +- Upgrade GRPC, GRPC-Gateway, Protobuf, PGX, and other dependencies. ### Fixed - Fix optimistic email imports when linking social profiles. diff --git a/go.mod b/go.mod index 46db99f33..834f9205a 100644 --- a/go.mod +++ b/go.mod @@ -1,41 +1,78 @@ module github.com/heroiclabs/nakama/v3 -go 1.16 +go 1.17 require ( - github.com/blevesearch/bleve/v2 v2.0.3 - github.com/blevesearch/upsidedown_store_api v1.0.1 - github.com/dgrijalva/jwt-go v3.2.1-0.20200107013213-dc14462fd587+incompatible - github.com/dop251/goja v0.0.0-20210406175830-1b11a6af686d - github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a // indirect + github.com/blugelabs/bluge v0.1.7 + github.com/blugelabs/query_string v0.2.0 + github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06 github.com/gofrs/uuid v4.0.0+incompatible - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang-jwt/jwt/v4 v4.1.0 github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.3.0 - github.com/heroiclabs/nakama-common v1.19.1-0.20211028165853-d67f8b2631f6 - github.com/jackc/pgconn v1.8.1 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0 + github.com/heroiclabs/nakama-common v0.0.0-20211029002510-769d7938e21f + github.com/jackc/pgconn v1.10.0 github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451 - github.com/jackc/pgtype v1.7.0 - github.com/jackc/pgx/v4 v4.11.0 - github.com/m3db/prometheus_client_golang v0.8.1 // indirect - github.com/m3db/prometheus_client_model v0.1.0 // indirect - github.com/m3db/prometheus_common v0.1.0 // indirect - github.com/m3db/prometheus_procfs v0.8.1 // indirect + github.com/jackc/pgtype v1.8.1 + github.com/jackc/pgx/v4 v4.13.0 github.com/rubenv/sql-migrate v0.0.0-20210408115534-a32ed26c37ea - github.com/steveyen/gtreap v0.1.0 github.com/stretchr/testify v1.7.0 - github.com/tinylib/msgp v1.1.2 // indirect - github.com/uber-go/tally v3.3.17+incompatible - github.com/ziutek/mymysql v1.5.4 // indirect - go.uber.org/atomic v1.7.0 - go.uber.org/zap v1.16.0 - golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf - google.golang.org/genproto v0.0.0-20210224155714-063164c882e6 - google.golang.org/grpc v1.37.0 + github.com/uber-go/tally/v4 v4.0.1 + go.uber.org/atomic v1.9.0 + go.uber.org/zap v1.19.1 + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 + google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 + google.golang.org/grpc v1.41.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 google.golang.org/protobuf v1.27.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0-20190411184413-94d9e492cc53 gopkg.in/yaml.v2 v2.4.0 ) + +require ( + github.com/RoaringBitmap/roaring v0.9.4 // indirect + github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.2.0 // indirect + github.com/blevesearch/go-porterstemmer v1.0.3 // indirect + github.com/blevesearch/mmap-go v1.0.3 // indirect + github.com/blevesearch/segment v0.9.0 // indirect + github.com/blevesearch/snowballstem v0.9.0 // indirect + github.com/blevesearch/vellum v1.0.7 // indirect + github.com/blugelabs/bluge_segment_api v0.2.0 // indirect + github.com/blugelabs/ice v0.2.0 // indirect + github.com/caio/go-tdigest v3.1.0+incompatible // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect + github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect + github.com/felixge/httpsnoop v1.0.1 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect + github.com/golang/glog v1.0.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.1 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.1.1 // indirect + github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mschoch/smat v0.2.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.11.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.26.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect + github.com/twmb/murmur3 v1.1.6 // indirect + github.com/ziutek/mymysql v1.5.4 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect + golang.org/x/text v0.3.7 // indirect + gopkg.in/gorp.v1 v1.7.2 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/go.sum b/go.sum index 174eb9848..7215d7991 100644 --- a/go.sum +++ b/go.sum @@ -21,7 +21,6 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -35,7 +34,6 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -43,94 +41,77 @@ github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030I github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/RoaringBitmap/gocroaring v0.4.0/go.mod h1:NieMwz7ZqwU2DD73/vvYwv7r4eWBKuPVSXZIpsaMwCI= +github.com/RoaringBitmap/real-roaring-datasets v0.0.0-20190726190000-eb7c87156f76/go.mod h1:oM0MHmQ3nDsq609SS36p+oYbRi16+oVvU2Bw4Ipv0SE= +github.com/RoaringBitmap/roaring v0.9.1/go.mod h1:h1B7iIUOmnAeb5ytYMvnHJwxMc6LUrwBnzXWRuqTQUc= +github.com/RoaringBitmap/roaring v0.9.4 h1:ckvZSX5gwCRaJYBNe7syNawCU5oruY9gQmjXlp4riwo= +github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f h1:y06x6vGnFYfXUoVMbrcP1Uzpj4JG01eB5vRps9G8agM= +github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blevesearch/bleve/v2 v2.0.3 h1:mDrwrsRIA4PDYkfUNjoh5zGECvquuJIA3MJU5ivaO8E= -github.com/blevesearch/bleve/v2 v2.0.3/go.mod h1:ip+4iafiEq2gCY5rJXe87bT6LkF/OJMCjQEYIfTBfW8= -github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= -github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= +github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo= github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= -github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1 h1:fd+hPtZ8GsbqPK1HslGp7Vhoik4arZteA/IsCEgOisw= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1/go.mod h1:lq7yK2jQy1yQjtjTfU931aVqz7pYxEudHaDwOt1tXfU= +github.com/blevesearch/mmap-go v1.0.3 h1:7QkALgFNooSq3a46AE+pWeKASAZc9SiNFJhDGF1NDx4= +github.com/blevesearch/mmap-go v1.0.3/go.mod h1:pYvKl/grLQrBxuaRYgoTssa4rVujYYeenDp++2E+yvs= github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt22Ac= github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s= github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= -github.com/blevesearch/upsidedown_store_api v1.0.1 h1:1SYRwyoFLwG3sj0ed89RLtM15amfX2pXlYbFOnF8zNU= -github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q= -github.com/blevesearch/vellum v1.0.3 h1:U86G41A7CtXNzzpIJHM8lSTUqz1Mp8U870TkcdCzZc8= -github.com/blevesearch/vellum v1.0.3/go.mod h1:2u5ax02KeDuNWu4/C+hVQMD6uLN4txH1JbtpaDNLJRo= -github.com/blevesearch/zapx/v11 v11.2.0 h1:GBkCJYsyj3eIU4+aiLPxoMz1PYvDbQZl/oXHIBZIP60= -github.com/blevesearch/zapx/v11 v11.2.0/go.mod h1:gN/a0alGw1FZt/YGTo1G6Z6XpDkeOfujX5exY9sCQQM= -github.com/blevesearch/zapx/v12 v12.2.0 h1:dyRcSoZVO1jktL4UpGkCEF1AYa3xhKPirh4/N+Va+Ww= -github.com/blevesearch/zapx/v12 v12.2.0/go.mod h1:fdjwvCwWWwJW/EYTYGtAp3gBA0geCYGLcVTtJEZnY6A= -github.com/blevesearch/zapx/v13 v13.2.0 h1:mUqbaqQABp8nBE4t4q2qMyHCCq4sykoV8r7aJk4ih3s= -github.com/blevesearch/zapx/v13 v13.2.0/go.mod h1:o5rAy/lRS5JpAbITdrOHBS/TugWYbkcYZTz6VfEinAQ= -github.com/blevesearch/zapx/v14 v14.2.0 h1:UsfRqvM9RJxKNKrkR1U7aYc1cv9MWx719fsAjbF6joI= -github.com/blevesearch/zapx/v14 v14.2.0/go.mod h1:GNgZusc1p4ot040cBQMRGEZobvwjCquiEKYh1xLFK9g= -github.com/blevesearch/zapx/v15 v15.2.0 h1:ZpibwcrrOaeslkOw3sJ7npP7KDgRHI/DkACjKTqFwyM= -github.com/blevesearch/zapx/v15 v15.2.0/go.mod h1:MmQceLpWfME4n1WrBFIwplhWmaQbQqLQARpaKUEOs/A= -github.com/bufbuild/buf v0.37.0/go.mod h1:lQ1m2HkIaGOFba6w/aC3KYBHhKEOESP3gaAEpS3dAFM= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/blevesearch/vellum v1.0.5/go.mod h1:atE0EH3fvk43zzS7t1YNdNC7DbmcC3uz+eMD5xZ2OyQ= +github.com/blevesearch/vellum v1.0.7 h1:+vn8rfyCRHxKVRgDLeR0FAXej2+6mEb5Q15aQE/XESQ= +github.com/blevesearch/vellum v1.0.7/go.mod h1:doBZpmRhwTsASB4QdUZANlJvqVAUdUyX0ZK7QJCTeBE= +github.com/blugelabs/bluge v0.1.7 h1:CIP3OlzWZ46GbqyIdxXIeLgcSRkCopFIcn81I4T+QG8= +github.com/blugelabs/bluge v0.1.7/go.mod h1:5d7LktUkQgvbh5Bmi6tPWtvo4+6uRTm6gAwP+5z6FqQ= +github.com/blugelabs/bluge_segment_api v0.2.0 h1:cCX1Y2y8v0LZ7+EEJ6gH7dW6TtVTW4RhG0vp3R+N2Lo= +github.com/blugelabs/bluge_segment_api v0.2.0/go.mod h1:95XA+ZXfRj/IXADm7gZ+iTcWOJPg5jQTY1EReIzl3LA= +github.com/blugelabs/ice v0.2.0 h1:9N/TRBqAr43emheD1ptk9mohuT6xAVq83gesgE60Qqk= +github.com/blugelabs/ice v0.2.0/go.mod h1:7foiDf4V83FIYYnGh2LOoRWsbNoCqAAMNgKn879Iyu0= +github.com/blugelabs/query_string v0.2.0 h1:ITgD9zF7HQiXstJgRZ+W4kWYUUKJNjhwwRXUtwX6WZs= +github.com/blugelabs/query_string v0.2.0/go.mod h1:H0YFWhYAf8/xcv1zoswoUC8kM/fE9L/KEfsgySsnhfs= +github.com/cactus/go-statsd-client v3.1.1+incompatible/go.mod h1:cMRcwZDklk7hXp+Law83urTHUiHMzCev/r4JMYr/zU0= +github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= +github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= -github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -139,51 +120,39 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.1-0.20200107013213-dc14462fd587+incompatible h1:CiQ/hJK0Lsc/2Gm9uMSIe7cFE+h0sbTwHuTGQkIZpio= -github.com/dgrijalva/jwt-go v3.2.1-0.20200107013213-dc14462fd587+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dop251/goja v0.0.0-20210406175830-1b11a6af686d h1:eyoriwRl4YlfXy64RCAiMyo3oX/UtA3eeje+qJk+fQA= -github.com/dop251/goja v0.0.0-20210406175830-1b11a6af686d/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06 h1:XqC5eocqw7r3+HOhKYqaYH07XBiBDp9WE3NQK8XHSn4= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a h1:FQqoVvjbiUioBBFUL5up+h+GdCa/AnJsL/1bIs/veSI= -github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -194,18 +163,17 @@ github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fd github.com/gobuffalo/packr/v2 v2.8.1 h1:tkQpju6i3EtMXJ9uoF5GT6kB+LMTimDWD8Xvbz6zDVA= github.com/gobuffalo/packr/v2 v2.8.1/go.mod h1:c/PLlOuTU+p3SybaJATW3H6lX/iK7xEz5OeMf+NnJpg= github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= +github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -231,11 +199,9 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -247,10 +213,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -262,67 +228,35 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.3.0 h1:IvO4FbbQL6n3v3M1rQNobZ61SGL0gJLdvKA5KETM7Xs= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.3.0/go.mod h1:d2gYTOTUQklu06xp0AJYYmRdTVU1VKrqhkYfYag2L08= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0 h1:rgxjzoDmDXw5q8HONgyHhBas4to0/XWRo/gPpJhsUNQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0/go.mod h1:qrJPVzv9YlhsrxJc3P/Q85nr0w1lIRikTl4JlhdDH5w= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/heroiclabs/nakama-common v1.19.0 h1:0VbaqexBVV+c3dUPn8NVGasmmg4Tyil9rnkGaRnKqvY= -github.com/heroiclabs/nakama-common v1.19.0/go.mod h1:jzIGV5bI45ALRQFzHPkJn4Z0tV+xhtho1+pZhOXVAsk= -github.com/heroiclabs/nakama-common v1.19.1-0.20211028165853-d67f8b2631f6 h1:NB0/C2vSySjmW601c01IjfewPJ1Av2LzTeGIqTXPHww= -github.com/heroiclabs/nakama-common v1.19.1-0.20211028165853-d67f8b2631f6/go.mod h1:jzIGV5bI45ALRQFzHPkJn4Z0tV+xhtho1+pZhOXVAsk= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/heroiclabs/nakama-common v0.0.0-20211029002510-769d7938e21f h1:7FHkwxUw1EpkwOzr18iB4KHuJm9PSEC9ccou/VJ8KzM= +github.com/heroiclabs/nakama-common v0.0.0-20211029002510-769d7938e21f/go.mod h1:jzIGV5bI45ALRQFzHPkJn4Z0tV+xhtho1+pZhOXVAsk= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxdb v1.7.6/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= @@ -331,17 +265,19 @@ github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgO github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.8.1 h1:ySBX7Q87vOMqKU2bbmKbUvtYhauDFclYbNDYIE1/h6s= -github.com/jackc/pgconn v1.8.1/go.mod h1:JV6m6b6jhjdmzchES0drzCcYcAHS1OPD5xu3OZ/lE2g= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU= +github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451 h1:WAvSpGf7MsFuzAtK4Vk7R4EVe+liW4x83r4oWu0WHKw= github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= @@ -350,54 +286,44 @@ github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.6 h1:b1105ZGEMFe7aCvrT1Cca3VoVb4ZFMaFJLJcg/3zD+8= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.7.0 h1:6f4kVsW01QftE38ufBYxKciO6gyioXSC0ABIRLcZrGs= -github.com/jackc/pgtype v1.7.0/go.mod h1:ZnHF+rMePVqDKaOfJVI4Q8IVvAQMryDlDkZnKOI75BE= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs= +github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/pgx/v4 v4.11.0 h1:J86tSWd3Y7nKjwT/43xZBvpi04keQWx8gNC2YkdJhZI= -github.com/jackc/pgx/v4 v4.11.0/go.mod h1:i62xJgdrtVDsnL3U8ekyrQXEwGNTRoG7/8r+CIdYfcc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570= +github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jhump/protoreflect v1.8.1/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/karrick/godirwalk v1.15.8 h1:7+rWAZPn9zuRxaIqqT8Ohs2Q2Ac0msBqwRdxNCr2VVs= github.com/karrick/godirwalk v1.15.8/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -408,25 +334,15 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= +github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/m3db/prometheus_client_golang v0.8.1 h1:t7w/tcFws81JL1j5sqmpqcOyQOpH4RDOmIe3A3fdN3w= -github.com/m3db/prometheus_client_golang v0.8.1/go.mod h1:8R/f1xYhXWq59KD/mbRqoBulXejss7vYtYzWmruNUwI= -github.com/m3db/prometheus_client_model v0.1.0 h1:cg1+DiuyT6x8h9voibtarkH1KT6CmsewBSaBhe8wzLo= -github.com/m3db/prometheus_client_model v0.1.0/go.mod h1:Qfsxn+LypxzF+lNhak7cF7k0zxK7uB/ynGYoj80zcD4= -github.com/m3db/prometheus_common v0.1.0 h1:YJu6eCIV6MQlcwND24cRG/aRkZDX1jvYbsNNs1ZYr0w= -github.com/m3db/prometheus_common v0.1.0/go.mod h1:EBmDQaMAy4B8i+qsg1wMXAelLNVbp49i/JOeVszQ/rs= -github.com/m3db/prometheus_procfs v0.8.1 h1:LsxWzVELhDU9sLsZTaFLCeAwCn7bC7qecZcK4zobs/g= -github.com/m3db/prometheus_procfs v0.8.1/go.mod h1:N8lv8fLh3U3koZx1Bnisj60GYUMDpWb09x1R+dmMOJo= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= @@ -435,108 +351,64 @@ github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -548,41 +420,26 @@ github.com/rubenv/sql-migrate v0.0.0-20210408115534-a32ed26c37ea h1:Yiqmu2rZoPdj github.com/rubenv/sql-migrate v0.0.0-20210408115534-a32ed26c37ea/go.mod h1:HFLT6i9iR4QBOF5rdCyjddC9t59ArqWJV2xx+jwcCMo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.0.1-0.20201006035406-b97b5ead31f7/go.mod h1:yk5b0mALVusDL5fMM6Rd1wgnoO5jUPhwsQ6LQAJTidQ= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/steveyen/gtreap v0.1.0 h1:CjhzTa274PyJLJuMZwIzCO1PfC00oRa8d1Kc78bFXJM= -github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -593,49 +450,39 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= -github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/twitchtv/twirp v7.1.0+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= -github.com/uber-go/tally v3.3.17+incompatible h1:nFHIuW3VQ22wItiE9kPXic8dEgExWOsVOHwpmoIvsMw= -github.com/uber-go/tally v3.3.17+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= +github.com/twmb/murmur3 v1.1.5/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= +github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/uber-go/tally/v4 v4.0.1 h1:Gb78H57b/dEn9zkGmSfSaIR1SjLMB4z38N0quvJ5ERo= +github.com/uber-go/tally/v4 v4.0.1/go.mod h1:mcbhHhuBx59QTSR77pXGWYyB0XgxO6OI9JKAgWDOiNY= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -645,28 +492,29 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf h1:B2n+Zi5QeYRDAEodEu72OS36gmTWjgpXr2+cWcBW90o= -golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -676,6 +524,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -687,8 +536,8 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -696,17 +545,13 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -733,16 +578,15 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -751,14 +595,12 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -773,11 +615,11 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -791,11 +633,17 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -803,22 +651,23 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -832,7 +681,6 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -856,13 +704,11 @@ golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWc golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d h1:W07d4xkoAUSNOkOzdzXCdFGxT7o2rW4q8M34tB2i//k= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -870,7 +716,11 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.7.0 h1:Hdks0L0hgznZLG9nzXb8vZ0rRvqNvAcgAp84y7Mwkgw= +gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -888,7 +738,6 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -899,7 +748,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -918,24 +766,20 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210224155714-063164c882e6 h1:bXUwz2WkXXrXgiLxww3vWmoSHLOGv4ipdPdTvKymcKw= -google.golang.org/genproto v0.0.0-20210224155714-063164c882e6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 h1:3V2dxSZpz4zozWWUq36vUxXEKnSYitEH2LdsAx+RUmg= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -944,12 +788,11 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.35.0-dev.0.20201218190559-666aea1fb34c/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.0.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -962,52 +805,45 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.25.1-0.20201208041424-160c7477e0e8/go.mod h1:hFxJC2f0epmp1elRCiEGJTKAWbwxZ2nvqZdHl3FQXCY= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20190411184413-94d9e492cc53 h1:7D4Fu4wpNSw/l+7Y5tL2ocLV1YC6BSOLs/o5OP1MfQI= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20190411184413-94d9e492cc53/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/validator.v2 v2.0.0-20200605151824-2b28d334fa05/go.mod h1:o4V0GXN9/CAmCsvJ0oXYZvrZOe7syiDZSN1GWGZTGzc= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/gtreap_compact/iterator.go b/gtreap_compact/iterator.go deleted file mode 100644 index d2ec054e6..000000000 --- a/gtreap_compact/iterator.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gtreap provides an in-memory implementation of the -// KVStore interfaces using the gtreap balanced-binary treap, -// copy-on-write data structure. -package gtreap_compact - -import ( - "bytes" - "sync" - - "github.com/steveyen/gtreap" -) - -type Iterator struct { - t *gtreap.Treap - - m sync.Mutex - cancelCh chan struct{} - nextCh chan *Item - curr *Item - currOk bool - - prefix []byte - start []byte - end []byte -} - -func (w *Iterator) Seek(k []byte) { - if w.start != nil && bytes.Compare(k, w.start) < 0 { - k = w.start - } - if w.prefix != nil && !bytes.HasPrefix(k, w.prefix) { - if bytes.Compare(k, w.prefix) < 0 { - k = w.prefix - } else { - var end []byte - for i := len(w.prefix) - 1; i >= 0; i-- { - c := w.prefix[i] - if c < 0xff { - end = make([]byte, i+1) - copy(end, w.prefix) - end[i] = c + 1 - break - } - } - k = end - } - } - w.restart(&Item{k: k}) -} - -func (w *Iterator) restart(start *Item) *Iterator { - cancelCh := make(chan struct{}) - nextCh := make(chan *Item, 1) - - w.m.Lock() - if w.cancelCh != nil { - close(w.cancelCh) - } - w.cancelCh = cancelCh - w.nextCh = nextCh - w.curr = nil - w.currOk = false - w.m.Unlock() - - go func() { - if start != nil { - w.t.VisitAscend(start, func(itm gtreap.Item) bool { - select { - case <-cancelCh: - return false - case nextCh <- itm.(*Item): - return true - } - }) - } - close(nextCh) - }() - - w.Next() - - return w -} - -func (w *Iterator) Next() { - w.m.Lock() - nextCh := w.nextCh - w.m.Unlock() - w.curr, w.currOk = <-nextCh -} - -func (w *Iterator) Current() ([]byte, []byte, bool) { - w.m.Lock() - defer w.m.Unlock() - if !w.currOk || w.curr == nil { - return nil, nil, false - } - if w.prefix != nil && !bytes.HasPrefix(w.curr.k, w.prefix) { - return nil, nil, false - } else if w.end != nil && bytes.Compare(w.curr.k, w.end) >= 0 { - return nil, nil, false - } - return w.curr.k, w.curr.v, w.currOk -} - -func (w *Iterator) Key() []byte { - k, _, ok := w.Current() - if !ok { - return nil - } - return k -} - -func (w *Iterator) Value() []byte { - _, v, ok := w.Current() - if !ok { - return nil - } - return v -} - -func (w *Iterator) Valid() bool { - _, _, ok := w.Current() - return ok -} - -func (w *Iterator) Close() error { - w.m.Lock() - if w.cancelCh != nil { - close(w.cancelCh) - } - w.cancelCh = nil - w.nextCh = nil - w.curr = nil - w.currOk = false - w.m.Unlock() - - return nil -} diff --git a/gtreap_compact/reader.go b/gtreap_compact/reader.go deleted file mode 100644 index 70aedb6fd..000000000 --- a/gtreap_compact/reader.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gtreap provides an in-memory implementation of the -// KVStore interfaces using the gtreap balanced-binary treap, -// copy-on-write data structure. -package gtreap_compact - -import ( - "github.com/blevesearch/upsidedown_store_api" - - "github.com/steveyen/gtreap" -) - -type Reader struct { - t *gtreap.Treap -} - -func (r *Reader) Get(k []byte) (v []byte, err error) { - var rv []byte - itm := r.t.Get(&Item{k: k}) - if itm != nil { - rv = make([]byte, len(itm.(*Item).v)) - copy(rv, itm.(*Item).v) - return rv, nil - } - return nil, nil -} - -func (r *Reader) MultiGet(keys [][]byte) ([][]byte, error) { - return store.MultiGet(r, keys) -} - -func (r *Reader) PrefixIterator(k []byte) store.KVIterator { - rv := Iterator{ - t: r.t, - prefix: k, - } - rv.restart(&Item{k: k}) - return &rv -} - -func (r *Reader) RangeIterator(start, end []byte) store.KVIterator { - rv := Iterator{ - t: r.t, - start: start, - end: end, - } - rv.restart(&Item{k: start}) - return &rv -} - -func (r *Reader) Close() error { - return nil -} diff --git a/gtreap_compact/store.go b/gtreap_compact/store.go deleted file mode 100644 index 93ddf8a1d..000000000 --- a/gtreap_compact/store.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gtreap provides an in-memory implementation of the -// KVStore interfaces using the gtreap balanced-binary treap, -// copy-on-write data structure. - -package gtreap_compact - -import ( - "bytes" - "fmt" - "os" - "sync" - - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/upsidedown_store_api" - "github.com/steveyen/gtreap" -) - -const Name = "gtreap_compact" - -type Store struct { - m sync.Mutex - t *gtreap.Treap - mo store.MergeOperator -} - -type Item struct { - k []byte - v []byte -} - -func itemCompare(a, b interface{}) int { - return bytes.Compare(a.(*Item).k, b.(*Item).k) -} - -func New(mo store.MergeOperator, config map[string]interface{}) (store.KVStore, error) { - path, ok := config["path"].(string) - if !ok { - return nil, fmt.Errorf("must specify path") - } - if path != "" { - return nil, os.ErrInvalid - } - - rv := Store{ - t: gtreap.NewTreap(itemCompare), - mo: mo, - } - return &rv, nil -} - -func (s *Store) Close() error { - return nil -} - -func (s *Store) Reader() (store.KVReader, error) { - s.m.Lock() - t := s.t - s.m.Unlock() - return &Reader{t: t}, nil -} - -func (s *Store) Writer() (store.KVWriter, error) { - return &Writer{s: s}, nil -} - -// Compact removes DictionaryTerm entries with a count of zero. -// This is a workaround for github issue #374. -// Code from https://github.com/blevesearch/bleve/pull/1317. -func (s *Store) Compact() (err error) { - kvreader, err := s.Reader() - if err != nil { - return err - } - - defer func() { - if cerr := kvreader.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - prefix := []byte("d") - - s.m.Lock() - defer s.m.Unlock() - it := kvreader.PrefixIterator(prefix) - defer func() { - if cerr := it.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - for ; it.Valid(); it.Next() { - k, v, _ := it.Current() - if bytes.Equal(v, []byte{0}) { - s.t = s.t.Delete(&Item{k: k}) - } - } - - return -} - -func init() { - registry.RegisterKVStore(Name, New) -} diff --git a/gtreap_compact/writer.go b/gtreap_compact/writer.go deleted file mode 100644 index 47626d530..000000000 --- a/gtreap_compact/writer.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gtreap provides an in-memory implementation of the -// KVStore interfaces using the gtreap balanced-binary treap, -// copy-on-write data structure. -package gtreap_compact - -import ( - "fmt" - "math/rand" - - "github.com/blevesearch/upsidedown_store_api" -) - -type Writer struct { - s *Store -} - -func (w *Writer) NewBatch() store.KVBatch { - return store.NewEmulatedBatch(w.s.mo) -} - -func (w *Writer) NewBatchEx(options store.KVBatchOptions) ([]byte, store.KVBatch, error) { - return make([]byte, options.TotalBytes), w.NewBatch(), nil -} - -func (w *Writer) ExecuteBatch(batch store.KVBatch) error { - - emulatedBatch, ok := batch.(*store.EmulatedBatch) - if !ok { - return fmt.Errorf("wrong type of batch") - } - - w.s.m.Lock() - for k, mergeOps := range emulatedBatch.Merger.Merges { - kb := []byte(k) - var existingVal []byte - existingItem := w.s.t.Get(&Item{k: kb}) - if existingItem != nil { - existingVal = w.s.t.Get(&Item{k: kb}).(*Item).v - } - mergedVal, fullMergeOk := w.s.mo.FullMerge(kb, existingVal, mergeOps) - if !fullMergeOk { - return fmt.Errorf("merge operator returned failure") - } - w.s.t = w.s.t.Upsert(&Item{k: kb, v: mergedVal}, rand.Int()) - } - - for _, op := range emulatedBatch.Ops { - if op.V != nil { - w.s.t = w.s.t.Upsert(&Item{k: op.K, v: op.V}, rand.Int()) - } else { - w.s.t = w.s.t.Delete(&Item{k: op.K}) - } - } - w.s.m.Unlock() - - return nil -} - -func (w *Writer) Close() error { - w.s.Compact() - w.s = nil - return nil -} diff --git a/iap/iap.go b/iap/iap.go index 93a2e20b0..c275cfdab 100644 --- a/iap/iap.go +++ b/iap/iap.go @@ -30,7 +30,7 @@ import ( "sync" "time" - "github.com/dgrijalva/jwt-go" + jwt "github.com/golang-jwt/jwt/v4" ) const ( diff --git a/server/api.go b/server/api.go index b59e75cc6..12bc7f902 100644 --- a/server/api.go +++ b/server/api.go @@ -30,8 +30,8 @@ import ( "strings" "time" - "github.com/dgrijalva/jwt-go" "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v4" "github.com/gorilla/handlers" "github.com/gorilla/mux" grpcgw "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" diff --git a/server/api_authenticate.go b/server/api_authenticate.go index 3b15ff930..cc7b86913 100644 --- a/server/api_authenticate.go +++ b/server/api_authenticate.go @@ -22,8 +22,8 @@ import ( "strings" "time" - "github.com/dgrijalva/jwt-go" "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v4" "github.com/heroiclabs/nakama-common/api" "go.uber.org/zap" "google.golang.org/grpc/codes" diff --git a/server/console.go b/server/console.go index 3cd779ac2..5762f675d 100644 --- a/server/console.go +++ b/server/console.go @@ -28,7 +28,7 @@ import ( "strings" "time" - "github.com/dgrijalva/jwt-go" + jwt "github.com/golang-jwt/jwt/v4" "github.com/gorilla/handlers" "github.com/gorilla/mux" grpcgw "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" diff --git a/server/console_authenticate.go b/server/console_authenticate.go index 98f29856d..2c6f82b1a 100644 --- a/server/console_authenticate.go +++ b/server/console_authenticate.go @@ -22,7 +22,7 @@ import ( "fmt" "time" - "github.com/dgrijalva/jwt-go" + jwt "github.com/golang-jwt/jwt/v4" "github.com/heroiclabs/nakama/v3/console" "github.com/jackc/pgtype" "go.uber.org/zap" diff --git a/server/match_common.go b/server/match_common.go new file mode 100644 index 000000000..5fd89d362 --- /dev/null +++ b/server/match_common.go @@ -0,0 +1,245 @@ +// Copyright 2021 The Nakama Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "fmt" + "reflect" + "strings" + "time" + + "github.com/blugelabs/bluge" + "github.com/blugelabs/bluge/analysis/analyzer" + "github.com/blugelabs/bluge/search" + queryStr "github.com/blugelabs/query_string" + "go.uber.org/zap" +) + +type blugeMatch struct { + ID string + Fields map[string]interface{} +} + +type BlugeResult struct { + Hits []*blugeMatch +} + +func IterateBlugeMatches(dmi search.DocumentMatchIterator, loadFields map[string]struct{}, logger *zap.Logger) (*BlugeResult, error) { + rv := &BlugeResult{} + dm, err := dmi.Next() + for dm != nil && err == nil { + var bm blugeMatch + bm.Fields = make(map[string]interface{}) + err = dm.VisitStoredFields(func(field string, value []byte) bool { + if field == "_id" { + bm.ID = string(value) + } + if _, ok := loadFields[field]; ok { + if field == "tick_rate" { + // hard-coded numeric decoding + bm.Fields[field], err = bluge.DecodeNumericFloat64(value) + if err != nil { + logger.Warn("error decoding numeric value: %v", zap.Error(err)) + } + } else { + bm.Fields[field] = string(value) + } + } + + return true + }) + if err != nil { + return nil, fmt.Errorf("error visiting stored field: %v", err.Error()) + } + rv.Hits = append(rv.Hits, &bm) + dm, err = dmi.Next() + } + if err != nil { + return nil, fmt.Errorf("error iterating document matches: %v", err.Error()) + } + + return rv, nil +} + +func BlugeWalkDocument(data interface{}, path []string, doc *bluge.Document) { + val := reflect.ValueOf(data) + if !val.IsValid() { + return + } + + typ := val.Type() + switch typ.Kind() { + case reflect.Map: + if typ.Key().Kind() == reflect.String { + for _, key := range val.MapKeys() { + fieldName := key.String() + fieldVal := val.MapIndex(key).Interface() + blugeProcessProperty(fieldVal, append(path, fieldName), doc) + } + } + case reflect.Struct: + for i := 0; i < val.NumField(); i++ { + field := typ.Field(i) + fieldName := field.Name + // anonymous fields of type struct can elide the type name + if field.Anonymous && field.Type.Kind() == reflect.Struct { + fieldName = "" + } + + // if the field has a name under the specified tag, prefer that + tag := field.Tag.Get("json") + tagFieldName := blugeParseTagName(tag) + if tagFieldName == "-" { + continue + } + // allow tag to set field name to empty, only if anonymous + if field.Tag != "" && (tagFieldName != "" || field.Anonymous) { + fieldName = tagFieldName + } + + if val.Field(i).CanInterface() { + fieldVal := val.Field(i).Interface() + newpath := path + if fieldName != "" { + newpath = append(path, fieldName) + } + blugeProcessProperty(fieldVal, newpath, doc) + } + } + case reflect.Slice, reflect.Array: + for i := 0; i < val.Len(); i++ { + if val.Index(i).CanInterface() { + fieldVal := val.Index(i).Interface() + blugeProcessProperty(fieldVal, path, doc) + } + } + case reflect.Ptr: + ptrElem := val.Elem() + if ptrElem.IsValid() && ptrElem.CanInterface() { + blugeProcessProperty(ptrElem.Interface(), path, doc) + } + case reflect.String: + blugeProcessProperty(val.String(), path, doc) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + blugeProcessProperty(float64(val.Int()), path, doc) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + blugeProcessProperty(float64(val.Uint()), path, doc) + case reflect.Float32, reflect.Float64: + blugeProcessProperty(float64(val.Float()), path, doc) + case reflect.Bool: + blugeProcessProperty(val.Bool(), path, doc) + } +} + +func blugeProcessProperty(property interface{}, path []string, doc *bluge.Document) { + pathString := strings.Join(path, ".") + + propertyValue := reflect.ValueOf(property) + if !propertyValue.IsValid() { + // cannot do anything with the zero value + return + } + propertyType := propertyValue.Type() + switch propertyType.Kind() { + case reflect.String: + propertyValueString := propertyValue.String() + + // automatic indexing behavior + // first see if it can be parsed as a date + parsedDateTime, err := blugeParseDateTime(propertyValueString) + if err != nil { + // index as text + doc.AddField(bluge.NewKeywordField(pathString, propertyValueString)) + } else { + // index as datetime + doc.AddField(bluge.NewDateTimeField(pathString, parsedDateTime)) + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + blugeProcessProperty(float64(propertyValue.Int()), path, doc) + return + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + blugeProcessProperty(float64(propertyValue.Uint()), path, doc) + return + case reflect.Float64, reflect.Float32: + propertyValFloat := propertyValue.Float() + + // automatic indexing behavior + doc.AddField(bluge.NewNumericField(pathString, propertyValFloat)) + + case reflect.Bool: + propertyValBool := propertyValue.Bool() + + // automatic indexing behavior + if propertyValBool { + doc.AddField(bluge.NewKeywordField(pathString, "T")) + } else { + doc.AddField(bluge.NewKeywordField(pathString, "F")) + } + + case reflect.Struct: + switch property := property.(type) { + case time.Time: + // don't descend into the time struct + doc.AddField(bluge.NewDateTimeField(pathString, property)) + + default: + BlugeWalkDocument(property, path, doc) + } + case reflect.Map, reflect.Slice: + BlugeWalkDocument(property, path, doc) + case reflect.Ptr: + if !propertyValue.IsNil() { + BlugeWalkDocument(property, path, doc) + } + default: + BlugeWalkDocument(property, path, doc) + } +} + +func blugeParseTagName(tag string) string { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx] + } + return tag +} + +func blugeParseDateTime(input string) (time.Time, error) { + layouts := []string{ + time.RFC3339Nano, + time.RFC3339, + "2006-01-02T15:04:05", // rfc3339NoTimezone + "2006-01-02 15:04:05", // rfc3339NoTimezoneNoT + "2006-01-02", // rfc3339NoTime + } + for _, layout := range layouts { + rv, err := time.Parse(layout, input) + if err == nil { + return rv, nil + } + } + return time.Time{}, fmt.Errorf("invalid date time") +} + +type ValidatableQuery interface { + Validate() error +} + +var blugeKeywordAnalyzer = analyzer.NewKeywordAnalyzer() + +func ParseQueryString(query string) (bluge.Query, error) { + opt := queryStr.DefaultOptions().WithDefaultAnalyzer(blugeKeywordAnalyzer) + return queryStr.ParseQueryString(query, opt) +} diff --git a/server/match_registry.go b/server/match_registry.go index 96feb803b..622723ba2 100644 --- a/server/match_registry.go +++ b/server/match_registry.go @@ -25,15 +25,12 @@ import ( "sync" "time" - "github.com/blevesearch/bleve/v2" - "github.com/blevesearch/bleve/v2/analysis/analyzer/keyword" - "github.com/blevesearch/bleve/v2/index/upsidedown" - "github.com/blevesearch/bleve/v2/search/query" + "github.com/blugelabs/bluge" + "github.com/blugelabs/bluge/index" "github.com/gofrs/uuid" "github.com/heroiclabs/nakama-common/api" "github.com/heroiclabs/nakama-common/rtapi" "github.com/heroiclabs/nakama-common/runtime" - "github.com/heroiclabs/nakama/v3/gtreap_compact" "go.uber.org/atomic" "go.uber.org/zap" "google.golang.org/protobuf/types/known/wrapperspb" @@ -136,9 +133,9 @@ type LocalMatchRegistry struct { ctx context.Context ctxCancelFn context.CancelFunc - matches *sync.Map - matchCount *atomic.Int64 - index bleve.Index + matches *sync.Map + matchCount *atomic.Int64 + indexWriter *bluge.Writer pendingUpdatesMutex *sync.Mutex pendingUpdates map[string]*MatchIndexEntry @@ -148,10 +145,9 @@ type LocalMatchRegistry struct { } func NewLocalMatchRegistry(logger, startupLogger *zap.Logger, config Config, sessionRegistry SessionRegistry, tracker Tracker, router MessageRouter, metrics Metrics, node string) MatchRegistry { - mapping := bleve.NewIndexMapping() - mapping.DefaultAnalyzer = keyword.Name - index, err := bleve.NewUsing("", mapping, upsidedown.Name, gtreap_compact.Name, nil) + cfg := bluge.InMemoryOnlyConfig() + indexWriter, err := bluge.OpenWriter(cfg) if err != nil { startupLogger.Fatal("Failed to create match registry index", zap.Error(err)) } @@ -170,9 +166,9 @@ func NewLocalMatchRegistry(logger, startupLogger *zap.Logger, config Config, ses ctx: ctx, ctxCancelFn: ctxCancelFn, - matches: &sync.Map{}, - matchCount: atomic.NewInt64(0), - index: index, + matches: &sync.Map{}, + matchCount: atomic.NewInt64(0), + indexWriter: indexWriter, pendingUpdatesMutex: &sync.Mutex{}, pendingUpdates: make(map[string]*MatchIndexEntry, 10), @@ -183,7 +179,7 @@ func NewLocalMatchRegistry(logger, startupLogger *zap.Logger, config Config, ses go func() { ticker := time.NewTicker(time.Duration(config.GetMatch().LabelUpdateIntervalMs) * time.Millisecond) - batch := r.index.NewBatch() + batch := bluge.NewBatch() for { select { case <-ctx.Done(): @@ -198,7 +194,7 @@ func NewLocalMatchRegistry(logger, startupLogger *zap.Logger, config Config, ses return r } -func (r *LocalMatchRegistry) processLabelUpdates(batch *bleve.Batch) { +func (r *LocalMatchRegistry) processLabelUpdates(batch *index.Batch) { r.pendingUpdatesMutex.Lock() if len(r.pendingUpdates) == 0 { r.pendingUpdatesMutex.Unlock() @@ -210,15 +206,17 @@ func (r *LocalMatchRegistry) processLabelUpdates(batch *bleve.Batch) { for id, op := range pendingUpdates { if op == nil { - batch.Delete(id) + batch.Delete(bluge.Identifier(id)) continue } - if err := batch.Index(id, op); err != nil { - r.logger.Error("error indexing match label update", zap.Error(err)) + doc, err := MapMatchIndexEntry(id, op, r.logger) + if err != nil { + r.logger.Error("error mapping match index entry to doc: %v", zap.Error(err)) } + batch.Update(bluge.Identifier(id), doc) } - if err := r.index.Batch(batch); err != nil { + if err := r.indexWriter.Batch(batch); err != nil { r.logger.Error("error processing match label updates", zap.Error(err)) } batch.Reset() @@ -371,8 +369,19 @@ func (r *LocalMatchRegistry) ListMatches(ctx context.Context, limit int, authori return make([]*api.Match, 0), nil } + indexReader, err := r.indexWriter.Reader() + if err != nil { + return nil, fmt.Errorf("error accessing index reader: %v", err.Error()) + } + defer func() { + err = indexReader.Close() + if err != nil { + r.logger.Error("error closing index reader", zap.Error(err)) + } + }() + var allowRelayed bool - var labelResults *bleve.SearchResult + var labelResults *BlugeResult if queryString != nil { if authoritative != nil && !authoritative.Value { // A filter on query is requested but authoritative matches are not allowed. @@ -389,20 +398,33 @@ func (r *LocalMatchRegistry) ListMatches(ctx context.Context, limit int, authori } // Apply the query filter to the set of known match labels. - var q query.Query + var q bluge.Query if queryString := queryString.Value; queryString == "" { - q = bleve.NewMatchAllQuery() + q = bluge.NewMatchAllQuery() } else { - q = bleve.NewQueryStringQuery(queryString) + var err error + q, err = ParseQueryString(queryString) + if err != nil { + return nil, fmt.Errorf("error parsing query string: %v", err.Error()) + } } - searchReq := bleve.NewSearchRequestOptions(q, count, 0, false) - searchReq.Fields = []string{"label_string", "tick_rate", "handler_name"} + + searchReq := bluge.NewTopNSearch(count, q) searchReq.SortBy([]string{"-create_time"}) - var err error - labelResults, err = r.index.SearchInContext(ctx, searchReq) + + labelResultsItr, err := indexReader.Search(ctx, searchReq) if err != nil { return nil, fmt.Errorf("error listing matches by query: %v", err.Error()) } + labelResults, err = IterateBlugeMatches(labelResultsItr, + map[string]struct{}{ + "label_string": {}, + "tick_rate": {}, + "handler_name": {}, + }, r.logger) + if err != nil { + return nil, fmt.Errorf("error iterating bluge matches: %v", err.Error()) + } } else if label != nil { if authoritative != nil && !authoritative.Value { // A filter on label is requested but authoritative matches are not allowed. @@ -419,16 +441,24 @@ func (r *LocalMatchRegistry) ListMatches(ctx context.Context, limit int, authori } // Apply the label filter to the set of known match labels. - indexQuery := bleve.NewMatchQuery(label.Value) + indexQuery := bluge.NewMatchQuery(label.Value) indexQuery.SetField("label_string") - searchReq := bleve.NewSearchRequestOptions(indexQuery, count, 0, false) - searchReq.Fields = []string{"label_string", "tick_rate", "handler_name"} + searchReq := bluge.NewTopNSearch(count, indexQuery) searchReq.SortBy([]string{"-create_time"}) - var err error - labelResults, err = r.index.SearchInContext(ctx, searchReq) + + labelResultsItr, err := indexReader.Search(ctx, searchReq) if err != nil { return nil, fmt.Errorf("error listing matches by label: %v", err.Error()) } + labelResults, err = IterateBlugeMatches(labelResultsItr, + map[string]struct{}{ + "label_string": {}, + "tick_rate": {}, + "handler_name": {}, + }, r.logger) + if err != nil { + return nil, fmt.Errorf("error iterating bluge matches: %v", err.Error()) + } } else if authoritative == nil || authoritative.Value { // Not using label/query filter but we still need access to the indexed labels to return them // if authoritative matches may be included in the results. @@ -440,15 +470,23 @@ func (r *LocalMatchRegistry) ListMatches(ctx context.Context, limit int, authori return make([]*api.Match, 0), nil } - indexQuery := bleve.NewMatchAllQuery() - searchReq := bleve.NewSearchRequestOptions(indexQuery, count, 0, false) - searchReq.Fields = []string{"label_string", "tick_rate", "handler_name"} + indexQuery := bluge.NewMatchAllQuery() + searchReq := bluge.NewTopNSearch(count, indexQuery) searchReq.SortBy([]string{"-create_time"}) - var err error - labelResults, err = r.index.SearchInContext(ctx, searchReq) + + labelResultsItr, err := indexReader.Search(ctx, searchReq) if err != nil { return nil, fmt.Errorf("error listing matches by label: %v", err.Error()) } + labelResults, err = IterateBlugeMatches(labelResultsItr, + map[string]struct{}{ + "label_string": {}, + "tick_rate": {}, + "handler_name": {}, + }, r.logger) + if err != nil { + return nil, fmt.Errorf("error iterating bluge matches: %v", err.Error()) + } if authoritative == nil { // Expect a possible mix of authoritative and relayed matches. @@ -459,7 +497,7 @@ func (r *LocalMatchRegistry) ListMatches(ctx context.Context, limit int, authori allowRelayed = true } - if labelResults != nil && labelResults.Hits.Len() == 0 && authoritative != nil && !authoritative.Value { + if labelResults != nil && len(labelResults.Hits) == 0 && authoritative != nil && !authoritative.Value { // No results based on label/query, no point in further filtering by size. return make([]*api.Match, 0), nil } @@ -806,3 +844,19 @@ func (r *LocalMatchRegistry) GetState(ctx context.Context, id uuid.UUID, node st return presences, r.Tick, r.State, nil } } + +func MapMatchIndexEntry(id string, in *MatchIndexEntry, logger *zap.Logger) (*bluge.Document, error) { + rv := bluge.NewDocument(id) + + rv.AddField(bluge.NewKeywordField("node", in.Node)) + rv.AddField(bluge.NewKeywordField("label_string", in.LabelString).StoreValue()) + rv.AddField(bluge.NewNumericField("tick_rate", float64(in.TickRate)).StoreValue()) + rv.AddField(bluge.NewKeywordField("handler_name", in.HandlerName).StoreValue()) + rv.AddField(bluge.NewNumericField("create_time", float64(in.CreateTime)).StoreValue()) + + if in.Label != nil { + BlugeWalkDocument(in.Label, []string{"label"}, rv) + } + + return rv, nil +} diff --git a/server/matchmaker.go b/server/matchmaker.go index 8832be752..3ab7f28ab 100644 --- a/server/matchmaker.go +++ b/server/matchmaker.go @@ -17,17 +17,16 @@ package server import ( "context" "fmt" + "math" "sync" "time" - "github.com/blevesearch/bleve/v2" - "github.com/blevesearch/bleve/v2/analysis/analyzer/keyword" - "github.com/blevesearch/bleve/v2/index/upsidedown" - "github.com/dgrijalva/jwt-go" + "github.com/blugelabs/bluge" + "github.com/blugelabs/bluge/index" "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v4" "github.com/heroiclabs/nakama-common/rtapi" "github.com/heroiclabs/nakama-common/runtime" - "github.com/heroiclabs/nakama/v3/gtreap_compact" "go.uber.org/atomic" "go.uber.org/zap" ) @@ -126,8 +125,7 @@ type LocalMatchmaker struct { ctx context.Context ctxCancelFn context.CancelFunc - index bleve.Index - batchPool chan *bleve.Batch + indexWriter *bluge.Writer sessionTickets map[string]map[string]struct{} partyTickets map[string]map[string]struct{} entries map[string][]*MatchmakerEntry @@ -136,10 +134,8 @@ type LocalMatchmaker struct { } func NewLocalMatchmaker(logger, startupLogger *zap.Logger, config Config, router MessageRouter, runtime *Runtime) Matchmaker { - mapping := bleve.NewIndexMapping() - mapping.DefaultAnalyzer = keyword.Name - - index, err := bleve.NewUsing("", mapping, upsidedown.Name, gtreap_compact.Name, nil) + cfg := bluge.InMemoryOnlyConfig() + indexWriter, err := bluge.OpenWriter(cfg) if err != nil { startupLogger.Fatal("Failed to create matchmaker index", zap.Error(err)) } @@ -157,8 +153,7 @@ func NewLocalMatchmaker(logger, startupLogger *zap.Logger, config Config, router ctx: ctx, ctxCancelFn: ctxCancelFn, - index: index, - batchPool: make(chan *bleve.Batch, config.GetMatchmaker().BatchPoolSize), + indexWriter: indexWriter, sessionTickets: make(map[string]map[string]struct{}), partyTickets: make(map[string]map[string]struct{}), entries: make(map[string][]*MatchmakerEntry), @@ -166,13 +161,9 @@ func NewLocalMatchmaker(logger, startupLogger *zap.Logger, config Config, router activeIndexes: make(map[string]*MatchmakerIndex), } - for i := 0; i < config.GetMatchmaker().BatchPoolSize; i++ { - m.batchPool <- m.index.NewBatch() - } - go func() { ticker := time.NewTicker(time.Duration(config.GetMatchmaker().IntervalSec) * time.Second) - batch := m.index.NewBatch() + batch := bluge.NewBatch() for { select { case <-ctx.Done(): @@ -191,7 +182,7 @@ func (m *LocalMatchmaker) Stop() { m.ctxCancelFn() } -func (m *LocalMatchmaker) process(batch *bleve.Batch) { +func (m *LocalMatchmaker) process(batch *index.Batch) { matchedEntries := make([][]*MatchmakerEntry, 0, 5) m.Lock() @@ -211,34 +202,65 @@ func (m *LocalMatchmaker) process(batch *bleve.Batch) { delete(m.activeIndexes, ticket) } - indexQuery := bleve.NewBooleanQuery() + indexQuery := bluge.NewBooleanQuery() // Results must match the query string. - indexQuery.AddMust(bleve.NewQueryStringQuery(index.Query)) + parsedIndexQuery, err := ParseQueryString(index.Query) + if err != nil { + m.logger.Error("error parsing query string", zap.Error(err)) + continue + } + indexQuery.AddMust(parsedIndexQuery) + // Results must also have compatible min/max ranges, for example 2-4 must not match with 6-8. - indexQuery.AddMust(bleve.NewQueryStringQuery(fmt.Sprintf("+min_count:<=%d +max_count:>=%d", index.MaxCount, index.MinCount))) - // Results must not include the current ticket. - ticketQuery := bleve.NewTermQuery(ticket) - ticketQuery.SetField("ticket") - indexQuery.AddMustNot(ticketQuery) + minCountRange := bluge.NewNumericRangeInclusiveQuery( + float64(index.MinCount), math.Inf(1), true, true). + SetField("min_count") + indexQuery.AddMust(minCountRange) + maxCountRange := bluge.NewNumericRangeInclusiveQuery( + math.Inf(-1), float64(index.MaxCount), true, true). + SetField("max_count") + indexQuery.AddMust(maxCountRange) + // Results must not include the current party, if any. if index.PartyId != "" { - partyIdQuery := bleve.NewTermQuery(index.PartyId) + partyIdQuery := bluge.NewTermQuery(index.PartyId) partyIdQuery.SetField("party_id") indexQuery.AddMustNot(partyIdQuery) } - searchRequest := bleve.NewSearchRequestOptions(indexQuery, len(m.indexes), 0, false) + searchRequest := bluge.NewTopNSearch(len(m.indexes), indexQuery) // Sort indexes to try and select the longest waiting tickets first. searchRequest.SortBy([]string{"created_at"}) - result, err := m.index.SearchInContext(m.ctx, searchRequest) + + indexReader, err := m.indexWriter.Reader() + if err != nil { + m.logger.Error("error accessing index reader", zap.Error(err)) + continue + } + + result, err := indexReader.Search(m.ctx, searchRequest) if err != nil { + _ = indexReader.Close() m.logger.Error("error searching index", zap.Error(err)) continue } + blugeMatches, err := IterateBlugeMatches(result, map[string]struct{}{}, m.logger) + if err != nil { + _ = indexReader.Close() + m.logger.Error("error iterating search results", zap.Error(err)) + continue + } + + err = indexReader.Close() + if err != nil { + m.logger.Error("error closing index reader", zap.Error(err)) + continue + } + // Form possible combinations, in case multiple matches might be suitable. entryCombos := make([][]*MatchmakerEntry, 0, 5) - for _, hit := range result.Hits { + for _, hit := range blugeMatches.Hits { if hit.ID == ticket { // Skip the current ticket. continue @@ -339,7 +361,7 @@ func (m *LocalMatchmaker) process(batch *bleve.Batch) { ticketsToDelete := make(map[string]struct{}, len(currentMatchedEntries)) for _, entry := range currentMatchedEntries { if _, ok := ticketsToDelete[entry.Ticket]; !ok { - batch.Delete(entry.Ticket) + batch.Delete(bluge.Identifier(entry.Ticket)) ticketsToDelete[entry.Ticket] = struct{}{} } delete(m.entries, entry.Ticket) @@ -362,7 +384,7 @@ func (m *LocalMatchmaker) process(batch *bleve.Batch) { } } } - if err := m.index.Batch(batch); err != nil { + if err := m.indexWriter.Batch(batch); err != nil { m.logger.Error("error deleting matchmaker process entries batch", zap.Error(err)) } batch.Reset() @@ -426,7 +448,6 @@ func (m *LocalMatchmaker) process(batch *bleve.Batch) { // Set per-recipient fields. outgoing.GetMatchmakerMatched().Self = users[i] outgoing.GetMatchmakerMatched().Ticket = entry.Ticket - // Route outgoing message. m.router.SendToPresenceIDs(m.logger, []*PresenceID{{Node: entry.Presence.Node, SessionID: entry.Presence.SessionID}}, outgoing, true) } @@ -439,9 +460,15 @@ func (m *LocalMatchmaker) Add(presences []*MatchmakerPresence, sessionID, partyI return "", runtime.ErrMatchmakerNotAvailable } - if bleve.NewQueryStringQuery(query).Validate() != nil { + parsedQuery, err := ParseQueryString(query) + if err != nil { return "", runtime.ErrMatchmakerQueryInvalid } + if parsedQuery, ok := parsedQuery.(ValidatableQuery); ok { + if parsedQuery.Validate() != nil { + return "", runtime.ErrMatchmakerQueryInvalid + } + } // Merge incoming properties. properties := make(map[string]interface{}, len(stringProperties)+len(numericProperties)) @@ -494,7 +521,14 @@ func (m *LocalMatchmaker) Add(presences []*MatchmakerPresence, sessionID, partyI } } - if err := m.index.Index(ticket, index); err != nil { + matchmakerIndexDoc, err := MapMatchmakerIndex(ticket, index) + if err != nil { + m.Unlock() + m.logger.Error("error mapping matchmaker index document", zap.Error(err)) + return "", runtime.ErrMatchmakerIndex + } + + if err := m.indexWriter.Update(bluge.Identifier(ticket), matchmakerIndexDoc); err != nil { m.Unlock() m.logger.Error("error indexing matchmaker entries", zap.Error(err)) return "", runtime.ErrMatchmakerIndex @@ -570,7 +604,7 @@ func (m *LocalMatchmaker) RemoveSession(sessionID, ticket string) error { delete(m.activeIndexes, ticket) - if err := m.index.Delete(ticket); err != nil { + if err := m.indexWriter.Delete(bluge.Identifier(ticket)); err != nil { m.Unlock() m.logger.Error("error deleting matchmaker entries", zap.Error(err)) return runtime.ErrMatchmakerDelete @@ -581,7 +615,7 @@ func (m *LocalMatchmaker) RemoveSession(sessionID, ticket string) error { } func (m *LocalMatchmaker) RemoveSessionAll(sessionID string) error { - batch := <-m.batchPool + batch := bluge.NewBatch() m.Lock() @@ -589,13 +623,12 @@ func (m *LocalMatchmaker) RemoveSessionAll(sessionID string) error { if !ok { // Session does not have any active matchmaking tickets. m.Unlock() - m.batchPool <- batch return nil } delete(m.sessionTickets, sessionID) for ticket := range sessionTickets { - batch.Delete(ticket) + batch.Delete(bluge.Identifier(ticket)) index, ok := m.indexes[ticket] if !ok { @@ -638,16 +671,8 @@ func (m *LocalMatchmaker) RemoveSessionAll(sessionID string) error { } } - if batch.Size() == 0 { - m.Unlock() - m.batchPool <- batch - return nil - } - - err := m.index.Batch(batch) + err := m.indexWriter.Batch(batch) m.Unlock() - batch.Reset() - m.batchPool <- batch if err != nil { m.logger.Error("error deleting matchmaker entries batch", zap.Error(err)) return runtime.ErrMatchmakerDelete @@ -692,7 +717,7 @@ func (m *LocalMatchmaker) RemoveParty(partyID, ticket string) error { delete(m.activeIndexes, ticket) - if err := m.index.Delete(ticket); err != nil { + if err := m.indexWriter.Delete(bluge.Identifier(ticket)); err != nil { m.Unlock() m.logger.Error("error deleting matchmaker entries", zap.Error(err)) return runtime.ErrMatchmakerDelete @@ -703,7 +728,7 @@ func (m *LocalMatchmaker) RemoveParty(partyID, ticket string) error { } func (m *LocalMatchmaker) RemovePartyAll(partyID string) error { - batch := <-m.batchPool + batch := bluge.NewBatch() m.Lock() @@ -711,13 +736,12 @@ func (m *LocalMatchmaker) RemovePartyAll(partyID string) error { if !ok { // Party does not have any active matchmaking tickets. m.Unlock() - m.batchPool <- batch return nil } delete(m.partyTickets, partyID) for ticket := range partyTickets { - batch.Delete(ticket) + batch.Delete(bluge.Identifier(ticket)) _, ok := m.indexes[ticket] if !ok { @@ -746,19 +770,27 @@ func (m *LocalMatchmaker) RemovePartyAll(partyID string) error { } } - if batch.Size() == 0 { - m.Unlock() - m.batchPool <- batch - return nil - } - - err := m.index.Batch(batch) + err := m.indexWriter.Batch(batch) m.Unlock() - batch.Reset() - m.batchPool <- batch if err != nil { m.logger.Error("error deleting matchmaker entries batch", zap.Error(err)) return runtime.ErrMatchmakerDelete } return nil } + +func MapMatchmakerIndex(id string, in *MatchmakerIndex) (*bluge.Document, error) { + rv := bluge.NewDocument(id) + + rv.AddField(bluge.NewKeywordField("ticket", in.Ticket).StoreValue()) + rv.AddField(bluge.NewNumericField("min_count", float64(in.MinCount)).StoreValue()) + rv.AddField(bluge.NewNumericField("max_count", float64(in.MaxCount)).StoreValue()) + rv.AddField(bluge.NewKeywordField("party_id", in.PartyId).StoreValue()) + rv.AddField(bluge.NewNumericField("created_at", float64(in.CreatedAt)).StoreValue()) + + if in.Properties != nil { + BlugeWalkDocument(in.Properties, []string{"properties"}, rv) + } + + return rv, nil +} diff --git a/server/metrics.go b/server/metrics.go index 01c899ee0..4e85b2239 100644 --- a/server/metrics.go +++ b/server/metrics.go @@ -24,8 +24,8 @@ import ( "time" "github.com/gorilla/handlers" - "github.com/uber-go/tally" - "github.com/uber-go/tally/prometheus" + "github.com/uber-go/tally/v4" + "github.com/uber-go/tally/v4/prometheus" "go.uber.org/atomic" "go.uber.org/zap" ) diff --git a/server/pipeline_match.go b/server/pipeline_match.go index b099c903a..cb705dcb9 100644 --- a/server/pipeline_match.go +++ b/server/pipeline_match.go @@ -20,8 +20,8 @@ import ( "strings" "time" - "github.com/dgrijalva/jwt-go" "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v4" "github.com/heroiclabs/nakama-common/rtapi" "go.uber.org/zap" "google.golang.org/protobuf/types/known/wrapperspb" @@ -35,7 +35,14 @@ type matchDataFilter struct { } func (p *Pipeline) matchCreate(logger *zap.Logger, session Session, envelope *rtapi.Envelope) { - matchID := uuid.Must(uuid.NewV4()) + var matchID uuid.UUID + if name := envelope.GetMatchCreate().Name; name != "" { + // Match being created with a name. Use it to derive a match ID. + matchID = uuid.NewV5(uuid.NamespaceDNS, name) + } else { + // No name specified, fully random match ID. + matchID = uuid.Must(uuid.NewV4()) + } username := session.Username() diff --git a/server/runtime_javascript_init.go b/server/runtime_javascript_init.go index f62131b33..ad80a8ecb 100644 --- a/server/runtime_javascript_init.go +++ b/server/runtime_javascript_init.go @@ -938,10 +938,11 @@ func (im *RuntimeJavascriptInitModule) getInitModuleFn() (*ast.BlockStatement, s if funDecl, ok := dec.(*ast.FunctionDeclaration); ok && funDecl.Function.Name.Name == INIT_MODULE_FN_NAME { fl = funDecl.Function break - } else if varStat, ok := dec.(*ast.VariableStatement); ok && varStat.List[0].Name == INIT_MODULE_FN_NAME { - if funLiteral, ok := varStat.List[0].Initializer.(*ast.FunctionLiteral); ok { - fl = funLiteral - break + } else if varStat, ok := dec.(*ast.VariableStatement); ok { + if id, ok := varStat.List[0].Target.(*ast.Identifier); ok && id.Name == INIT_MODULE_FN_NAME { + if fnLit, ok := varStat.List[0].Initializer.(*ast.FunctionLiteral); ok { + fl = fnLit + } } } } @@ -949,8 +950,11 @@ func (im *RuntimeJavascriptInitModule) getInitModuleFn() (*ast.BlockStatement, s if fl == nil { return nil, "", errors.New("failed to find InitModule function") } + if len(fl.ParameterList.List) < 4 { + return nil, "", errors.New("InitModule function is missing params") + } - initFnName := fl.ParameterList.List[3].Name.String() // Initializer is the 4th argument of InitModule + initFnName := fl.ParameterList.List[3].Target.(*ast.Identifier).Name.String() // Initializer is the 4th argument of InitModule return fl.Body, initFnName, nil } @@ -1350,11 +1354,11 @@ func (im *RuntimeJavascriptInitModule) getMatchHookFnIdentifier(r *goja.Runtime, if obj, ok := callExp.ArgumentList[1].(*ast.ObjectLiteral); ok { for _, prop := range obj.Value { - key, _ := prop.Key.(*ast.StringLiteral) + key, _ := prop.(*ast.PropertyKeyed).Key.(*ast.StringLiteral) if key.Literal == string(matchfnId) { - if sl, ok := prop.Value.(*ast.StringLiteral); ok { + if sl, ok := prop.(*ast.PropertyKeyed).Value.(*ast.StringLiteral); ok { return sl.Literal, nil - } else if id, ok := prop.Value.(*ast.Identifier); ok { + } else if id, ok := prop.(*ast.PropertyKeyed).Value.(*ast.Identifier); ok { return id.Name.String(), nil } else { return "", inlinedFunctionError diff --git a/server/runtime_javascript_nakama.go b/server/runtime_javascript_nakama.go index e33125ad7..ab64df923 100644 --- a/server/runtime_javascript_nakama.go +++ b/server/runtime_javascript_nakama.go @@ -40,9 +40,9 @@ import ( "strings" "time" - "github.com/dgrijalva/jwt-go" "github.com/dop251/goja" "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v4" "github.com/heroiclabs/nakama-common/api" "github.com/heroiclabs/nakama-common/rtapi" "github.com/heroiclabs/nakama-common/runtime" diff --git a/server/runtime_lua_nakama.go b/server/runtime_lua_nakama.go index cb4e44816..386056cef 100644 --- a/server/runtime_lua_nakama.go +++ b/server/runtime_lua_nakama.go @@ -41,8 +41,8 @@ import ( "sync" "time" - "github.com/dgrijalva/jwt-go" "github.com/gofrs/uuid" + jwt "github.com/golang-jwt/jwt/v4" "github.com/heroiclabs/nakama-common/api" "github.com/heroiclabs/nakama-common/rtapi" "github.com/heroiclabs/nakama-common/runtime" diff --git a/social/social.go b/social/social.go index 432bb315e..84b7157bf 100644 --- a/social/social.go +++ b/social/social.go @@ -35,7 +35,7 @@ import ( "sync" "time" - "github.com/dgrijalva/jwt-go" + jwt "github.com/golang-jwt/jwt/v4" "go.uber.org/zap" ) diff --git a/vendor/github.com/RoaringBitmap/roaring/.gitignore b/vendor/github.com/RoaringBitmap/roaring/.gitignore index b7943ab20..851f323db 100644 --- a/vendor/github.com/RoaringBitmap/roaring/.gitignore +++ b/vendor/github.com/RoaringBitmap/roaring/.gitignore @@ -3,4 +3,3 @@ roaring-fuzz.zip workdir coverage.out testdata/all3.classic -testdata/all3.msgp.snappy diff --git a/vendor/github.com/RoaringBitmap/roaring/.travis.yml b/vendor/github.com/RoaringBitmap/roaring/.travis.yml index c17804322..0a4c4e918 100644 --- a/vendor/github.com/RoaringBitmap/roaring/.travis.yml +++ b/vendor/github.com/RoaringBitmap/roaring/.travis.yml @@ -8,13 +8,8 @@ install: notifications: email: false go: -- "1.7.x" -- "1.8.x" -- "1.9.x" -- "1.10.x" -- "1.11.x" -- "1.12.x" - "1.13.x" +- "1.14.x" - tip # whitelist @@ -22,7 +17,7 @@ branches: only: - master script: -- goveralls -v -service travis-ci -ignore arraycontainer_gen.go,bitmapcontainer_gen.go,rle16_gen.go,rle_gen.go,roaringarray_gen.go,rle.go || go test +- goveralls -v -service travis-ci -ignore rle16_gen.go,rle_gen.go,rle.go || go test - go test -race -run TestConcurrent* - go build -tags appengine - go test -tags appengine diff --git a/vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS b/vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS index b1e3a379f..1a8da9cc0 100644 --- a/vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS +++ b/vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS @@ -13,4 +13,6 @@ Forud Ghafouri (@fzerorubigd), Joe Nall (@joenall), (@fredim), Edd Robinson (@e-dard), -Alexander Petrov (@alldroll) +Alexander Petrov (@alldroll), +Guy Molinari (@guymolinari), +Ling Jin (@JinLingChristopher) diff --git a/vendor/github.com/RoaringBitmap/roaring/Makefile b/vendor/github.com/RoaringBitmap/roaring/Makefile index 906bd7256..0a4f9f0aa 100644 --- a/vendor/github.com/RoaringBitmap/roaring/Makefile +++ b/vendor/github.com/RoaringBitmap/roaring/Makefile @@ -64,7 +64,7 @@ qa: fmtcheck test vet lint # Get the dependencies deps: GOPATH=$(GOPATH) go get github.com/stretchr/testify - GOPATH=$(GOPATH) go get github.com/willf/bitset + GOPATH=$(GOPATH) go get github.com/bits-and-blooms/bitset GOPATH=$(GOPATH) go get github.com/golang/lint/golint GOPATH=$(GOPATH) go get github.com/mschoch/smat GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz @@ -97,10 +97,6 @@ nuke: rm -rf ./target GOPATH=$(GOPATH) go clean -i ./... - -ser: - go generate - cover: go test -coverprofile=coverage.out go tool cover -html=coverage.out diff --git a/vendor/github.com/RoaringBitmap/roaring/README.md b/vendor/github.com/RoaringBitmap/roaring/README.md index 94fdf057e..2a7a12906 100644 --- a/vendor/github.com/RoaringBitmap/roaring/README.md +++ b/vendor/github.com/RoaringBitmap/roaring/README.md @@ -1,5 +1,8 @@ -roaring [![Build Status](https://travis-ci.org/RoaringBitmap/roaring.png)](https://travis-ci.org/RoaringBitmap/roaring) [![Coverage Status](https://coveralls.io/repos/github/RoaringBitmap/roaring/badge.svg?branch=master)](https://coveralls.io/github/RoaringBitmap/roaring?branch=master) [![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring) [![Go Report Card](https://goreportcard.com/badge/RoaringBitmap/roaring)](https://goreportcard.com/report/github.com/RoaringBitmap/roaring) +roaring [![Build Status](https://travis-ci.org/RoaringBitmap/roaring.png)](https://travis-ci.org/RoaringBitmap/roaring) [![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring/roaring64?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring/roaring64) [![Go Report Card](https://goreportcard.com/badge/RoaringBitmap/roaring)](https://goreportcard.com/report/github.com/RoaringBitmap/roaring) [![Build Status](https://cloud.drone.io/api/badges/RoaringBitmap/roaring/status.svg)](https://cloud.drone.io/RoaringBitmap/roaring) +![Go-CI](https://github.com/RoaringBitmap/roaring/workflows/Go-CI/badge.svg) +![Go-ARM-CI](https://github.com/RoaringBitmap/roaring/workflows/Go-ARM-CI/badge.svg) +![Go-Windows-CI](https://github.com/RoaringBitmap/roaring/workflows/Go-Windows-CI/badge.svg) ============= This is a go version of the Roaring bitmap data structure. @@ -7,7 +10,7 @@ This is a go version of the Roaring bitmap data structure. Roaring bitmaps are used by several major systems such as [Apache Lucene][lucene] and derivative systems such as [Solr][solr] and -[Elasticsearch][elasticsearch], [Apache Druid (Incubating)][druid], [LinkedIn Pinot][pinot], [Netflix Atlas][atlas], [Apache Spark][spark], [OpenSearchServer][opensearchserver], [Cloud Torrent][cloudtorrent], [Whoosh][whoosh], [Pilosa][pilosa], [Microsoft Visual Studio Team Services (VSTS)][vsts], and eBay's [Apache Kylin][kylin]. +[Elasticsearch][elasticsearch], [Apache Druid (Incubating)][druid], [LinkedIn Pinot][pinot], [Netflix Atlas][atlas], [Apache Spark][spark], [OpenSearchServer][opensearchserver], [Cloud Torrent][cloudtorrent], [Whoosh][whoosh], [Pilosa][pilosa], [Microsoft Visual Studio Team Services (VSTS)][vsts], and eBay's [Apache Kylin][kylin]. The YouTube SQL Engine, [Google Procella](https://research.google/pubs/pub48388/), uses Roaring bitmaps for indexing. [lucene]: https://lucene.apache.org/ [solr]: https://lucene.apache.org/solr/ @@ -52,6 +55,93 @@ This code is licensed under Apache License, Version 2.0 (ASL2.0). Copyright 2016-... by the authors. +When should you use a bitmap? +=================================== + + +Sets are a fundamental abstraction in +software. They can be implemented in various +ways, as hash sets, as trees, and so forth. +In databases and search engines, sets are often an integral +part of indexes. For example, we may need to maintain a set +of all documents or rows (represented by numerical identifier) +that satisfy some property. Besides adding or removing +elements from the set, we need fast functions +to compute the intersection, the union, the difference between sets, and so on. + + +To implement a set +of integers, a particularly appealing strategy is the +bitmap (also called bitset or bit vector). Using n bits, +we can represent any set made of the integers from the range +[0,n): the ith bit is set to one if integer i is present in the set. +Commodity processors use words of W=32 or W=64 bits. By combining many such words, we can +support large values of n. Intersections, unions and differences can then be implemented + as bitwise AND, OR and ANDNOT operations. +More complicated set functions can also be implemented as bitwise operations. + +When the bitset approach is applicable, it can be orders of +magnitude faster than other possible implementation of a set (e.g., as a hash set) +while using several times less memory. + +However, a bitset, even a compressed one is not always applicable. For example, if +you have 1000 random-looking integers, then a simple array might be the best representation. +We refer to this case as the "sparse" scenario. + +When should you use compressed bitmaps? +=================================== + +An uncompressed BitSet can use a lot of memory. For example, if you take a BitSet +and set the bit at position 1,000,000 to true and you have just over 100kB. That is over 100kB +to store the position of one bit. This is wasteful even if you do not care about memory: +suppose that you need to compute the intersection between this BitSet and another one +that has a bit at position 1,000,001 to true, then you need to go through all these zeroes, +whether you like it or not. That can become very wasteful. + +This being said, there are definitively cases where attempting to use compressed bitmaps is wasteful. +For example, if you have a small universe size. E.g., your bitmaps represent sets of integers +from [0,n) where n is small (e.g., n=64 or n=128). If you are able to uncompressed BitSet and +it does not blow up your memory usage, then compressed bitmaps are probably not useful +to you. In fact, if you do not need compression, then a BitSet offers remarkable speed. + +The sparse scenario is another use case where compressed bitmaps should not be used. +Keep in mind that random-looking data is usually not compressible. E.g., if you have a small set of +32-bit random integers, it is not mathematically possible to use far less than 32 bits per integer, +and attempts at compression can be counterproductive. + +How does Roaring compares with the alternatives? +================================================== + + +Most alternatives to Roaring are part of a larger family of compressed bitmaps that are run-length-encoded +bitmaps. They identify long runs of 1s or 0s and they represent them with a marker word. +If you have a local mix of 1s and 0, you use an uncompressed word. + +There are many formats in this family: + +* Oracle's BBC is an obsolete format at this point: though it may provide good compression, +it is likely much slower than more recent alternatives due to excessive branching. +* WAH is a patented variation on BBC that provides better performance. +* Concise is a variation on the patented WAH. It some specific instances, it can compress +much better than WAH (up to 2x better), but it is generally slower. +* EWAH is both free of patent, and it is faster than all the above. On the downside, it +does not compress quite as well. It is faster because it allows some form of "skipping" +over uncompressed words. So though none of these formats are great at random access, EWAH +is better than the alternatives. + + + +There is a big problem with these formats however that can hurt you badly in some cases: there is no random access. If you want to check whether a given value is present in the set, you have to start from the beginning and "uncompress" the whole thing. This means that if you want to intersect a big set with a large set, you still have to uncompress the whole big set in the worst case... + +Roaring solves this problem. It works in the following manner. It divides the data into chunks of 216 integers +(e.g., [0, 216), [216, 2 x 216), ...). Within a chunk, it can use an uncompressed bitmap, a simple list of integers, +or a list of runs. Whatever format it uses, they all allow you to check for the present of any one value quickly +(e.g., with a binary search). The net result is that Roaring can compute many operations much faster than run-length-encoded +formats like WAH, EWAH, Concise... Maybe surprisingly, Roaring also generally offers better compression ratios. + + + + ### References @@ -68,7 +158,7 @@ http://arxiv.org/abs/1402.6407 This paper used data from http://lemire.me/data/r Dependencies are fetched automatically by giving the `-t` flag to `go get`. they include - - github.com/willf/bitset + - github.com/bits-and-blooms/bitset - github.com/mschoch/smat - github.com/glycerine/go-unsnap-stream - github.com/philhofer/fwd @@ -172,10 +262,70 @@ That is, given a fixed overhead for the universe size (x), Roaring bitmaps never use more than 2 bytes per integer. You can call ``BoundSerializedSizeInBytes`` for a more precise estimate. +### 64-bit Roaring + +By default, roaring is used to stored unsigned 32-bit integers. However, we also offer +an extension dedicated to 64-bit integers. It supports roughly the same functions: + +```go +package main + +import ( + "fmt" + "github.com/RoaringBitmap/roaring/roaring64" + "bytes" +) + + +func main() { + // example inspired by https://github.com/fzandona/goroar + fmt.Println("==roaring64==") + rb1 := roaring64.BitmapOf(1, 2, 3, 4, 5, 100, 1000) + fmt.Println(rb1.String()) + + rb2 := roaring64.BitmapOf(3, 4, 1000) + fmt.Println(rb2.String()) + + rb3 := roaring64.New() + fmt.Println(rb3.String()) + + fmt.Println("Cardinality: ", rb1.GetCardinality()) + + fmt.Println("Contains 3? ", rb1.Contains(3)) + + rb1.And(rb2) + + rb3.Add(1) + rb3.Add(5) + + rb3.Or(rb1) + + + + // prints 1, 3, 4, 5, 1000 + i := rb3.Iterator() + for i.HasNext() { + fmt.Println(i.Next()) + } + fmt.Println() + + // next we include an example of serialization + buf := new(bytes.Buffer) + rb1.WriteTo(buf) // we omit error handling + newrb:= roaring64.New() + newrb.ReadFrom(buf) + if rb1.Equals(newrb) { + fmt.Println("I wrote the content to a byte stream and read it back.") + } + // you can iterate over bitmaps using ReverseIterator(), Iterator, ManyIterator() +} +``` + +Only the 32-bit roaring format is standard and cross-operable between Java, C++, C and Go. There is no guarantee that the 64-bit versions are compatible. ### Documentation -Current documentation is available at http://godoc.org/github.com/RoaringBitmap/roaring +Current documentation is available at http://godoc.org/github.com/RoaringBitmap/roaring and http://godoc.org/github.com/RoaringBitmap/roaring64 ### Goroutine safety @@ -234,12 +384,14 @@ You can help us test further the library with fuzzy testing: go get github.com/dvyukov/go-fuzz/go-fuzz-build go test -tags=gofuzz -run=TestGenerateSmatCorpus go-fuzz-build github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 + go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 -func FuzzSmat Let it run, and if the # of crashers is > 0, check out the reports in the workdir where you should be able to find the panic goroutine stack traces. +You may also replace `-func FuzzSmat` by `-func FuzzSerializationBuffer` or `-func FuzzSerializationStream`. + ### Alternative in Go There is a Go version wrapping the C/C++ implementation https://github.com/RoaringBitmap/gocroaring diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go index eb124f3b7..80b7eecf7 100644 --- a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go @@ -4,8 +4,6 @@ import ( "fmt" ) -//go:generate msgp -unexported - type arrayContainer struct { content []uint16 } @@ -18,10 +16,11 @@ func (ac *arrayContainer) String() string { return s + "}" } -func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) { +func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) int { for k := 0; k < len(ac.content); k++ { x[k+i] = uint32(ac.content[k]) | mask } + return i + len(ac.content) } func (ac *arrayContainer) iterate(cb func(x uint16) bool) bool { @@ -359,28 +358,17 @@ func (ac *arrayContainer) iorArray(value2 *arrayContainer) container { len1 := value1.getCardinality() len2 := value2.getCardinality() maxPossibleCardinality := len1 + len2 - if maxPossibleCardinality > arrayDefaultMaxSize { // it could be a bitmap! - bc := newBitmapContainer() - for k := 0; k < len(value2.content); k++ { - v := value2.content[k] - i := uint(v) >> 6 - mask := uint64(1) << (v % 64) - bc.bitmap[i] |= mask - } - for k := 0; k < len(ac.content); k++ { - v := ac.content[k] - i := uint(v) >> 6 - mask := uint64(1) << (v % 64) - bc.bitmap[i] |= mask - } - bc.cardinality = int(popcntSlice(bc.bitmap)) - if bc.cardinality <= arrayDefaultMaxSize { - return bc.toArrayContainer() - } - return bc - } if maxPossibleCardinality > cap(value1.content) { - newcontent := make([]uint16, 0, maxPossibleCardinality) + // doubling the capacity reduces new slice allocations in the case of + // repeated calls to iorArray(). + newSize := 2 * maxPossibleCardinality + // the second check is to handle overly large array containers + // and should not occur in normal usage, + // as all array containers should be at most arrayDefaultMaxSize + if newSize > 2*arrayDefaultMaxSize && maxPossibleCardinality <= 2*arrayDefaultMaxSize { + newSize = 2 * arrayDefaultMaxSize + } + newcontent := make([]uint16, 0, newSize) copy(newcontent[len2:maxPossibleCardinality], ac.content[0:len1]) ac.content = newcontent } else { @@ -388,6 +376,13 @@ func (ac *arrayContainer) iorArray(value2 *arrayContainer) container { } nl := union2by2(value1.content[len2:maxPossibleCardinality], value2.content, ac.content) ac.content = ac.content[:nl] // reslice to match actual used capacity + + if nl > arrayDefaultMaxSize { + // Only converting to a bitmap when arrayDefaultMaxSize + // is actually exceeded minimizes conversions in the case of repeated + // calls to iorArray(). + return ac.toBitmapContainer() + } return ac } @@ -400,11 +395,19 @@ func (ac *arrayContainer) iorBitmap(bc2 *bitmapContainer) container { } func (ac *arrayContainer) iorRun16(rc *runContainer16) container { - bc1 := ac.toBitmapContainer() - bc2 := rc.toBitmapContainer() - bc1.iorBitmap(bc2) - *ac = *newArrayContainerFromBitmap(bc1) - return ac + runCardinality := rc.getCardinality() + // heuristic for if the container should maybe be an + // array container. + if runCardinality < ac.getCardinality() && + runCardinality+ac.getCardinality() < arrayDefaultMaxSize { + var result container + result = ac + for _, run := range rc.iv { + result = result.iaddRange(int(run.start), int(run.start)+int(run.length)+1) + } + return result + } + return rc.orArray(ac) } func (ac *arrayContainer) lazyIOR(a container) container { @@ -489,7 +492,7 @@ func (ac *arrayContainer) orArrayCardinality(value2 *arrayContainer) int { func (ac *arrayContainer) lazyorArray(value2 *arrayContainer) container { value1 := ac maxPossibleCardinality := value1.getCardinality() + value2.getCardinality() - if maxPossibleCardinality > arrayLazyLowerBound { // it could be a bitmap!^M + if maxPossibleCardinality > arrayLazyLowerBound { // it could be a bitmap! bc := newBitmapContainer() for k := 0; k < len(value2.content); k++ { v := value2.content[k] @@ -849,6 +852,10 @@ func (ac *arrayContainer) getCardinality() int { return len(ac.content) } +func (ac *arrayContainer) isEmpty() bool { + return len(ac.content) == 0 +} + func (ac *arrayContainer) rank(x uint16) int { answer := binarySearch(ac.content, x) if answer >= 0 { @@ -876,6 +883,41 @@ func (ac *arrayContainer) loadData(bitmapContainer *bitmapContainer) { ac.content = make([]uint16, bitmapContainer.cardinality, bitmapContainer.cardinality) bitmapContainer.fillArray(ac.content) } + +func (ac *arrayContainer) resetTo(a container) { + switch x := a.(type) { + case *arrayContainer: + ac.realloc(len(x.content)) + copy(ac.content, x.content) + + case *bitmapContainer: + ac.realloc(x.cardinality) + x.fillArray(ac.content) + + case *runContainer16: + card := int(x.getCardinality()) + ac.realloc(card) + cur := 0 + for _, r := range x.iv { + for val := r.start; val <= r.last(); val++ { + ac.content[cur] = val + cur++ + } + } + + default: + panic("unsupported container type") + } +} + +func (ac *arrayContainer) realloc(size int) { + if cap(ac.content) < size { + ac.content = make([]uint16, size) + } else { + ac.content = ac.content[:size] + } +} + func newArrayContainer() *arrayContainer { p := new(arrayContainer) return p @@ -927,10 +969,10 @@ func (ac *arrayContainer) numberOfRuns() (nr int) { runlen++ } else { if cur < prev { - panic("then fundamental arrayContainer assumption of sorted ac.content was broken") + panic("the fundamental arrayContainer assumption of sorted ac.content was broken") } if cur == prev { - panic("then fundamental arrayContainer assumption of deduplicated content was broken") + panic("the fundamental arrayContainer assumption of deduplicated content was broken") } else { nr++ runlen = 0 diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go deleted file mode 100644 index 6ee670ee5..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go +++ /dev/null @@ -1,134 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import "github.com/tinylib/msgp/msgp" - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *arrayContainer) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zbzg uint32 - zbzg, err = dc.ReadMapHeader() - if err != nil { - return - } - for zbzg > 0 { - zbzg-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "content": - var zbai uint32 - zbai, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.content) >= int(zbai) { - z.content = (z.content)[:zbai] - } else { - z.content = make([]uint16, zbai) - } - for zxvk := range z.content { - z.content[zxvk], err = dc.ReadUint16() - if err != nil { - return - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *arrayContainer) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "content" - err = en.Append(0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.content))) - if err != nil { - return - } - for zxvk := range z.content { - err = en.WriteUint16(z.content[zxvk]) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *arrayContainer) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "content" - o = append(o, 0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74) - o = msgp.AppendArrayHeader(o, uint32(len(z.content))) - for zxvk := range z.content { - o = msgp.AppendUint16(o, z.content[zxvk]) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *arrayContainer) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zcmr uint32 - zcmr, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zcmr > 0 { - zcmr-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "content": - var zajw uint32 - zajw, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.content) >= int(zajw) { - z.content = (z.content)[:zajw] - } else { - z.content = make([]uint16, zajw) - } - for zxvk := range z.content { - z.content[zxvk], bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *arrayContainer) Msgsize() (s int) { - s = 1 + 8 + msgp.ArrayHeaderSize + (len(z.content) * (msgp.Uint16Size)) - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go index cd259fd2d..f8367da0e 100644 --- a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go @@ -5,8 +5,6 @@ import ( "unsafe" ) -//go:generate msgp -unexported - type bitmapContainer struct { cardinality int bitmap []uint64 @@ -115,7 +113,7 @@ type bitmapContainerShortIterator struct { func (bcsi *bitmapContainerShortIterator) next() uint16 { j := bcsi.i - bcsi.i = bcsi.ptr.NextSetBit(bcsi.i + 1) + bcsi.i = bcsi.ptr.NextSetBit(uint(bcsi.i) + 1) return uint16(j) } func (bcsi *bitmapContainerShortIterator) hasNext() bool { @@ -128,7 +126,7 @@ func (bcsi *bitmapContainerShortIterator) peekNext() uint16 { func (bcsi *bitmapContainerShortIterator) advanceIfNeeded(minval uint16) { if bcsi.hasNext() && bcsi.peekNext() < minval { - bcsi.i = bcsi.ptr.NextSetBit(int(minval)) + bcsi.i = bcsi.ptr.NextSetBit(uint(minval)) } } @@ -203,6 +201,33 @@ func (bcmi *bitmapContainerManyIterator) nextMany(hs uint32, buf []uint32) int { return n } +func (bcmi *bitmapContainerManyIterator) nextMany64(hs uint64, buf []uint64) int { + n := 0 + base := bcmi.base + bitset := bcmi.bitset + + for n < len(buf) { + if bitset == 0 { + base++ + if base >= len(bcmi.ptr.bitmap) { + bcmi.base = base + bcmi.bitset = bitset + return n + } + bitset = bcmi.ptr.bitmap[base] + continue + } + t := bitset & -bitset + buf[n] = uint64(((base * 64) + int(popcount(t-1)))) | hs + n = n + 1 + bitset ^= t + } + + bcmi.base = base + bcmi.bitset = bitset + return n +} + func newBitmapContainerManyIterator(a *bitmapContainer) *bitmapContainerManyIterator { return &bitmapContainerManyIterator{a, -1, 0} } @@ -239,7 +264,7 @@ func bitmapEquals(a, b []uint64) bool { return true } -func (bc *bitmapContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) { +func (bc *bitmapContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) int { // TODO: should be written as optimized assembly pos := i base := mask @@ -253,6 +278,7 @@ func (bc *bitmapContainer) fillLeastSignificant16bits(x []uint32, i int, mask ui } base += 64 } + return pos } func (bc *bitmapContainer) equals(o container) bool { @@ -324,6 +350,11 @@ func (bc *bitmapContainer) getCardinality() int { return bc.cardinality } + +func (bc *bitmapContainer) isEmpty() bool { + return bc.cardinality == 0 +} + func (bc *bitmapContainer) clone() container { ptr := bitmapContainer{bc.cardinality, make([]uint64, len(bc.bitmap))} copy(ptr.bitmap, bc.bitmap[:]) @@ -934,6 +965,32 @@ func (bc *bitmapContainer) loadData(arrayContainer *arrayContainer) { } } +func (bc *bitmapContainer) resetTo(a container) { + switch x := a.(type) { + case *arrayContainer: + fill(bc.bitmap, 0) + bc.loadData(x) + + case *bitmapContainer: + bc.cardinality = x.cardinality + copy(bc.bitmap, x.bitmap) + + case *runContainer16: + bc.cardinality = len(x.iv) + lastEnd := 0 + for _, r := range x.iv { + bc.cardinality += int(r.length) + resetBitmapRange(bc.bitmap, lastEnd, int(r.start)) + lastEnd = int(r.start+r.length) + 1 + setBitmapRange(bc.bitmap, int(r.start), lastEnd) + } + resetBitmapRange(bc.bitmap, lastEnd, maxCapacity) + + default: + panic("unsupported container type") + } +} + func (bc *bitmapContainer) toArrayContainer() *arrayContainer { ac := &arrayContainer{} ac.loadData(bc) @@ -956,20 +1013,23 @@ func (bc *bitmapContainer) fillArray(container []uint16) { } } -func (bc *bitmapContainer) NextSetBit(i int) int { - x := i / 64 - if x >= len(bc.bitmap) { +func (bc *bitmapContainer) NextSetBit(i uint) int { + var ( + x = i / 64 + length = uint(len(bc.bitmap)) + ) + if x >= length { return -1 } w := bc.bitmap[x] w = w >> uint(i%64) if w != 0 { - return i + countTrailingZeros(w) + return int(i) + countTrailingZeros(w) } x++ - for ; x < len(bc.bitmap); x++ { + for ; x < length; x++ { if bc.bitmap[x] != 0 { - return (x * 64) + countTrailingZeros(bc.bitmap[x]) + return int(x*64) + countTrailingZeros(bc.bitmap[x]) } } return -1 @@ -1078,16 +1138,12 @@ func (bc *bitmapContainer) addOffset(x uint16) []container { low.bitmap[b] = bc.bitmap[0] << i for k := uint32(1); k < end; k++ { newval := bc.bitmap[k] << i - if newval == 0 { - newval = bc.bitmap[k-1] >> (64 - i) - } + newval |= bc.bitmap[k-1] >> (64 - i) low.bitmap[b+k] = newval } for k := end; k < 1024; k++ { newval := bc.bitmap[k] << i - if newval == 0 { - newval = bc.bitmap[k-1] >> (64 - i) - } + newval |= bc.bitmap[k-1] >> (64 - i) high.bitmap[k-end] = newval } high.bitmap[b] = bc.bitmap[1023] >> (64 - i) diff --git a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go deleted file mode 100644 index 9b5a465f3..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go +++ /dev/null @@ -1,415 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import "github.com/tinylib/msgp/msgp" - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *bitmapContainer) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zbzg uint32 - zbzg, err = dc.ReadMapHeader() - if err != nil { - return - } - for zbzg > 0 { - zbzg-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.cardinality, err = dc.ReadInt() - if err != nil { - return - } - case "bitmap": - var zbai uint32 - zbai, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.bitmap) >= int(zbai) { - z.bitmap = (z.bitmap)[:zbai] - } else { - z.bitmap = make([]uint64, zbai) - } - for zxvk := range z.bitmap { - z.bitmap[zxvk], err = dc.ReadUint64() - if err != nil { - return - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *bitmapContainer) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "cardinality" - err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - if err != nil { - return err - } - err = en.WriteInt(z.cardinality) - if err != nil { - return - } - // write "bitmap" - err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.bitmap))) - if err != nil { - return - } - for zxvk := range z.bitmap { - err = en.WriteUint64(z.bitmap[zxvk]) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *bitmapContainer) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "cardinality" - o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - o = msgp.AppendInt(o, z.cardinality) - // string "bitmap" - o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - o = msgp.AppendArrayHeader(o, uint32(len(z.bitmap))) - for zxvk := range z.bitmap { - o = msgp.AppendUint64(o, z.bitmap[zxvk]) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *bitmapContainer) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zcmr uint32 - zcmr, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zcmr > 0 { - zcmr-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.cardinality, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - return - } - case "bitmap": - var zajw uint32 - zajw, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.bitmap) >= int(zajw) { - z.bitmap = (z.bitmap)[:zajw] - } else { - z.bitmap = make([]uint64, zajw) - } - for zxvk := range z.bitmap { - z.bitmap[zxvk], bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *bitmapContainer) Msgsize() (s int) { - s = 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.bitmap) * (msgp.Uint64Size)) - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *bitmapContainerShortIterator) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zhct uint32 - zhct, err = dc.ReadMapHeader() - if err != nil { - return - } - for zhct > 0 { - zhct-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "ptr": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - return - } - z.ptr = nil - } else { - if z.ptr == nil { - z.ptr = new(bitmapContainer) - } - var zcua uint32 - zcua, err = dc.ReadMapHeader() - if err != nil { - return - } - for zcua > 0 { - zcua-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.ptr.cardinality, err = dc.ReadInt() - if err != nil { - return - } - case "bitmap": - var zxhx uint32 - zxhx, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.ptr.bitmap) >= int(zxhx) { - z.ptr.bitmap = (z.ptr.bitmap)[:zxhx] - } else { - z.ptr.bitmap = make([]uint64, zxhx) - } - for zwht := range z.ptr.bitmap { - z.ptr.bitmap[zwht], err = dc.ReadUint64() - if err != nil { - return - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "i": - z.i, err = dc.ReadInt() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *bitmapContainerShortIterator) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "ptr" - err = en.Append(0x82, 0xa3, 0x70, 0x74, 0x72) - if err != nil { - return err - } - if z.ptr == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - // map header, size 2 - // write "cardinality" - err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - if err != nil { - return err - } - err = en.WriteInt(z.ptr.cardinality) - if err != nil { - return - } - // write "bitmap" - err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.ptr.bitmap))) - if err != nil { - return - } - for zwht := range z.ptr.bitmap { - err = en.WriteUint64(z.ptr.bitmap[zwht]) - if err != nil { - return - } - } - } - // write "i" - err = en.Append(0xa1, 0x69) - if err != nil { - return err - } - err = en.WriteInt(z.i) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *bitmapContainerShortIterator) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "ptr" - o = append(o, 0x82, 0xa3, 0x70, 0x74, 0x72) - if z.ptr == nil { - o = msgp.AppendNil(o) - } else { - // map header, size 2 - // string "cardinality" - o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - o = msgp.AppendInt(o, z.ptr.cardinality) - // string "bitmap" - o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - o = msgp.AppendArrayHeader(o, uint32(len(z.ptr.bitmap))) - for zwht := range z.ptr.bitmap { - o = msgp.AppendUint64(o, z.ptr.bitmap[zwht]) - } - } - // string "i" - o = append(o, 0xa1, 0x69) - o = msgp.AppendInt(o, z.i) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *bitmapContainerShortIterator) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zlqf uint32 - zlqf, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zlqf > 0 { - zlqf-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "ptr": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.ptr = nil - } else { - if z.ptr == nil { - z.ptr = new(bitmapContainer) - } - var zdaf uint32 - zdaf, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zdaf > 0 { - zdaf-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.ptr.cardinality, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - return - } - case "bitmap": - var zpks uint32 - zpks, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.ptr.bitmap) >= int(zpks) { - z.ptr.bitmap = (z.ptr.bitmap)[:zpks] - } else { - z.ptr.bitmap = make([]uint64, zpks) - } - for zwht := range z.ptr.bitmap { - z.ptr.bitmap[zwht], bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "i": - z.i, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *bitmapContainerShortIterator) Msgsize() (s int) { - s = 1 + 4 - if z.ptr == nil { - s += msgp.NilSize - } else { - s += 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.ptr.bitmap) * (msgp.Uint64Size)) - } - s += 2 + msgp.IntSize - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/byte_input.go b/vendor/github.com/RoaringBitmap/roaring/byte_input.go deleted file mode 100644 index f7a98a1d4..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/byte_input.go +++ /dev/null @@ -1,161 +0,0 @@ -package roaring - -import ( - "encoding/binary" - "io" -) - -type byteInput interface { - // next returns a slice containing the next n bytes from the buffer, - // advancing the buffer as if the bytes had been returned by Read. - next(n int) ([]byte, error) - // readUInt32 reads uint32 with LittleEndian order - readUInt32() (uint32, error) - // readUInt16 reads uint16 with LittleEndian order - readUInt16() (uint16, error) - // getReadBytes returns read bytes - getReadBytes() int64 - // skipBytes skips exactly n bytes - skipBytes(n int) error -} - -func newByteInputFromReader(reader io.Reader) byteInput { - return &byteInputAdapter{ - r: reader, - readBytes: 0, - } -} - -func newByteInput(buf []byte) byteInput { - return &byteBuffer{ - buf: buf, - off: 0, - } -} - -type byteBuffer struct { - buf []byte - off int -} - -// next returns a slice containing the next n bytes from the reader -// If there are fewer bytes than the given n, io.ErrUnexpectedEOF will be returned -func (b *byteBuffer) next(n int) ([]byte, error) { - m := len(b.buf) - b.off - - if n > m { - return nil, io.ErrUnexpectedEOF - } - - data := b.buf[b.off : b.off+n] - b.off += n - - return data, nil -} - -// readUInt32 reads uint32 with LittleEndian order -func (b *byteBuffer) readUInt32() (uint32, error) { - if len(b.buf)-b.off < 4 { - return 0, io.ErrUnexpectedEOF - } - - v := binary.LittleEndian.Uint32(b.buf[b.off:]) - b.off += 4 - - return v, nil -} - -// readUInt16 reads uint16 with LittleEndian order -func (b *byteBuffer) readUInt16() (uint16, error) { - if len(b.buf)-b.off < 2 { - return 0, io.ErrUnexpectedEOF - } - - v := binary.LittleEndian.Uint16(b.buf[b.off:]) - b.off += 2 - - return v, nil -} - -// getReadBytes returns read bytes -func (b *byteBuffer) getReadBytes() int64 { - return int64(b.off) -} - -// skipBytes skips exactly n bytes -func (b *byteBuffer) skipBytes(n int) error { - m := len(b.buf) - b.off - - if n > m { - return io.ErrUnexpectedEOF - } - - b.off += n - - return nil -} - -// reset resets the given buffer with a new byte slice -func (b *byteBuffer) reset(buf []byte) { - b.buf = buf - b.off = 0 -} - -type byteInputAdapter struct { - r io.Reader - readBytes int -} - -// next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -func (b *byteInputAdapter) next(n int) ([]byte, error) { - buf := make([]byte, n) - m, err := io.ReadAtLeast(b.r, buf, n) - b.readBytes += m - - if err != nil { - return nil, err - } - - return buf, nil -} - -// readUInt32 reads uint32 with LittleEndian order -func (b *byteInputAdapter) readUInt32() (uint32, error) { - buf, err := b.next(4) - - if err != nil { - return 0, err - } - - return binary.LittleEndian.Uint32(buf), nil -} - -// readUInt16 reads uint16 with LittleEndian order -func (b *byteInputAdapter) readUInt16() (uint16, error) { - buf, err := b.next(2) - - if err != nil { - return 0, err - } - - return binary.LittleEndian.Uint16(buf), nil -} - -// getReadBytes returns read bytes -func (b *byteInputAdapter) getReadBytes() int64 { - return int64(b.readBytes) -} - -// skipBytes skips exactly n bytes -func (b *byteInputAdapter) skipBytes(n int) error { - _, err := b.next(n) - - return err -} - -// reset resets the given buffer with a new stream -func (b *byteInputAdapter) reset(stream io.Reader) { - b.r = stream - b.readBytes = 0 -} diff --git a/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go b/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go index 762e500ed..47bda7125 100644 --- a/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go +++ b/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go @@ -33,15 +33,6 @@ main: s2 = x2.highlowcontainer.getKeyAtIndex(pos2) } else { c1 := x1.highlowcontainer.getContainerAtIndex(pos1) - switch t := c1.(type) { - case *arrayContainer: - c1 = t.toBitmapContainer() - case *runContainer16: - if !t.isFull() { - c1 = t.toBitmapContainer() - } - } - answer.highlowcontainer.appendContainer(s1, c1.lazyOR(x2.highlowcontainer.getContainerAtIndex(pos2)), false) pos1++ pos2++ @@ -89,18 +80,7 @@ main: } s2 = x2.highlowcontainer.getKeyAtIndex(pos2) } else { - c1 := x1.highlowcontainer.getContainerAtIndex(pos1) - switch t := c1.(type) { - case *arrayContainer: - c1 = t.toBitmapContainer() - case *runContainer16: - if !t.isFull() { - c1 = t.toBitmapContainer() - } - case *bitmapContainer: - c1 = x1.highlowcontainer.getWritableContainerAtIndex(pos1) - } - + c1 := x1.highlowcontainer.getWritableContainerAtIndex(pos1) x1.highlowcontainer.containers[pos1] = c1.lazyIOR(x2.highlowcontainer.getContainerAtIndex(pos2)) x1.highlowcontainer.needCopyOnWrite[pos1] = false pos1++ @@ -213,3 +193,117 @@ func HeapXor(bitmaps ...*Bitmap) *Bitmap { } return heap.Pop(&pq).(*item).value } + +// AndAny provides a result equivalent to x1.And(FastOr(bitmaps)). +// It's optimized to minimize allocations. It also might be faster than separate calls. +func (x1 *Bitmap) AndAny(bitmaps ...*Bitmap) { + if len(bitmaps) == 0 { + return + } else if len(bitmaps) == 1 { + x1.And(bitmaps[0]) + return + } + + type withPos struct { + bitmap *roaringArray + pos int + key uint16 + } + filters := make([]withPos, 0, len(bitmaps)) + + for _, b := range bitmaps { + if b.highlowcontainer.size() > 0 { + filters = append(filters, withPos{ + bitmap: &b.highlowcontainer, + pos: 0, + key: b.highlowcontainer.getKeyAtIndex(0), + }) + } + } + + basePos := 0 + intersections := 0 + keyContainers := make([]container, 0, len(filters)) + var ( + tmpArray *arrayContainer + tmpBitmap *bitmapContainer + minNextKey uint16 + ) + + for basePos < x1.highlowcontainer.size() && len(filters) > 0 { + baseKey := x1.highlowcontainer.getKeyAtIndex(basePos) + + // accumulate containers for current key, find next minimal key in filters + // and exclude filters that do not have related values anymore + i := 0 + maxPossibleOr := 0 + minNextKey = MaxUint16 + for _, f := range filters { + if f.key < baseKey { + f.pos = f.bitmap.advanceUntil(baseKey, f.pos) + if f.pos == f.bitmap.size() { + continue + } + f.key = f.bitmap.getKeyAtIndex(f.pos) + } + + if f.key == baseKey { + cont := f.bitmap.getContainerAtIndex(f.pos) + keyContainers = append(keyContainers, cont) + maxPossibleOr += cont.getCardinality() + + f.pos++ + if f.pos == f.bitmap.size() { + continue + } + f.key = f.bitmap.getKeyAtIndex(f.pos) + } + + minNextKey = minOfUint16(minNextKey, f.key) + filters[i] = f + i++ + } + filters = filters[:i] + + if len(keyContainers) == 0 { + basePos = x1.highlowcontainer.advanceUntil(minNextKey, basePos) + continue + } + + var ored container + + if len(keyContainers) == 1 { + ored = keyContainers[0] + } else { + //TODO: special case for run containers? + if maxPossibleOr > arrayDefaultMaxSize { + if tmpBitmap == nil { + tmpBitmap = newBitmapContainer() + } + tmpBitmap.resetTo(keyContainers[0]) + ored = tmpBitmap + } else { + if tmpArray == nil { + tmpArray = newArrayContainerCapacity(maxPossibleOr) + } + tmpArray.realloc(maxPossibleOr) + tmpArray.resetTo(keyContainers[0]) + ored = tmpArray + } + for _, c := range keyContainers[1:] { + ored = ored.ior(c) + } + } + + result := x1.highlowcontainer.getWritableContainerAtIndex(basePos).iand(ored) + if !result.isEmpty() { + x1.highlowcontainer.replaceKeyAndContainerAtIndex(intersections, baseKey, result, false) + intersections++ + } + + keyContainers = keyContainers[:0] + basePos = x1.highlowcontainer.advanceUntil(minNextKey, basePos) + } + + x1.highlowcontainer.resize(intersections) +} diff --git a/vendor/github.com/RoaringBitmap/roaring/go.mod b/vendor/github.com/RoaringBitmap/roaring/go.mod deleted file mode 100644 index f5aebf396..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/go.mod +++ /dev/null @@ -1,16 +0,0 @@ -module github.com/RoaringBitmap/roaring - -go 1.12 - -require ( - github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 - github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 // indirect - github.com/golang/snappy v0.0.1 // indirect - github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 // indirect - github.com/jtolds/gls v4.20.0+incompatible // indirect - github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae - github.com/philhofer/fwd v1.0.0 // indirect - github.com/stretchr/testify v1.4.0 - github.com/tinylib/msgp v1.1.0 - github.com/willf/bitset v1.1.10 -) diff --git a/vendor/github.com/RoaringBitmap/roaring/go.sum b/vendor/github.com/RoaringBitmap/roaring/go.sum deleted file mode 100644 index 2e27dbb6e..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/go.sum +++ /dev/null @@ -1,30 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go b/vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go new file mode 100644 index 000000000..3e5490a9d --- /dev/null +++ b/vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go @@ -0,0 +1,166 @@ +package internal + +import ( + "encoding/binary" + "io" +) + +// ByteInput typed interface around io.Reader or raw bytes +type ByteInput interface { + // Next returns a slice containing the next n bytes from the buffer, + // advancing the buffer as if the bytes had been returned by Read. + Next(n int) ([]byte, error) + // ReadUInt32 reads uint32 with LittleEndian order + ReadUInt32() (uint32, error) + // ReadUInt16 reads uint16 with LittleEndian order + ReadUInt16() (uint16, error) + // GetReadBytes returns read bytes + GetReadBytes() int64 + // SkipBytes skips exactly n bytes + SkipBytes(n int) error +} + +// NewByteInputFromReader creates reader wrapper +func NewByteInputFromReader(reader io.Reader) ByteInput { + return &ByteInputAdapter{ + r: reader, + readBytes: 0, + } +} + +// NewByteInput creates raw bytes wrapper +func NewByteInput(buf []byte) ByteInput { + return &ByteBuffer{ + buf: buf, + off: 0, + } +} + +// ByteBuffer raw bytes wrapper +type ByteBuffer struct { + buf []byte + off int +} + +// Next returns a slice containing the next n bytes from the reader +// If there are fewer bytes than the given n, io.ErrUnexpectedEOF will be returned +func (b *ByteBuffer) Next(n int) ([]byte, error) { + m := len(b.buf) - b.off + + if n > m { + return nil, io.ErrUnexpectedEOF + } + + data := b.buf[b.off : b.off+n] + b.off += n + + return data, nil +} + +// ReadUInt32 reads uint32 with LittleEndian order +func (b *ByteBuffer) ReadUInt32() (uint32, error) { + if len(b.buf)-b.off < 4 { + return 0, io.ErrUnexpectedEOF + } + + v := binary.LittleEndian.Uint32(b.buf[b.off:]) + b.off += 4 + + return v, nil +} + +// ReadUInt16 reads uint16 with LittleEndian order +func (b *ByteBuffer) ReadUInt16() (uint16, error) { + if len(b.buf)-b.off < 2 { + return 0, io.ErrUnexpectedEOF + } + + v := binary.LittleEndian.Uint16(b.buf[b.off:]) + b.off += 2 + + return v, nil +} + +// GetReadBytes returns read bytes +func (b *ByteBuffer) GetReadBytes() int64 { + return int64(b.off) +} + +// SkipBytes skips exactly n bytes +func (b *ByteBuffer) SkipBytes(n int) error { + m := len(b.buf) - b.off + + if n > m { + return io.ErrUnexpectedEOF + } + + b.off += n + + return nil +} + +// Reset resets the given buffer with a new byte slice +func (b *ByteBuffer) Reset(buf []byte) { + b.buf = buf + b.off = 0 +} + +// ByteInputAdapter reader wrapper +type ByteInputAdapter struct { + r io.Reader + readBytes int +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +func (b *ByteInputAdapter) Next(n int) ([]byte, error) { + buf := make([]byte, n) + m, err := io.ReadAtLeast(b.r, buf, n) + b.readBytes += m + + if err != nil { + return nil, err + } + + return buf, nil +} + +// ReadUInt32 reads uint32 with LittleEndian order +func (b *ByteInputAdapter) ReadUInt32() (uint32, error) { + buf, err := b.Next(4) + + if err != nil { + return 0, err + } + + return binary.LittleEndian.Uint32(buf), nil +} + +// ReadUInt16 reads uint16 with LittleEndian order +func (b *ByteInputAdapter) ReadUInt16() (uint16, error) { + buf, err := b.Next(2) + + if err != nil { + return 0, err + } + + return binary.LittleEndian.Uint16(buf), nil +} + +// GetReadBytes returns read bytes +func (b *ByteInputAdapter) GetReadBytes() int64 { + return int64(b.readBytes) +} + +// SkipBytes skips exactly n bytes +func (b *ByteInputAdapter) SkipBytes(n int) error { + _, err := b.Next(n) + + return err +} + +// Reset resets the given buffer with a new stream +func (b *ByteInputAdapter) Reset(stream io.Reader) { + b.r = stream + b.readBytes = 0 +} diff --git a/vendor/github.com/RoaringBitmap/roaring/internal/pools.go b/vendor/github.com/RoaringBitmap/roaring/internal/pools.go new file mode 100644 index 000000000..d2583568d --- /dev/null +++ b/vendor/github.com/RoaringBitmap/roaring/internal/pools.go @@ -0,0 +1,21 @@ +package internal + +import ( + "sync" +) + +var ( + // ByteInputAdapterPool shared pool + ByteInputAdapterPool = sync.Pool{ + New: func() interface{} { + return &ByteInputAdapter{} + }, + } + + // ByteBufferPool shared pool + ByteBufferPool = sync.Pool{ + New: func() interface{} { + return &ByteBuffer{} + }, + } +) diff --git a/vendor/github.com/RoaringBitmap/roaring/manyiterator.go b/vendor/github.com/RoaringBitmap/roaring/manyiterator.go index 300756377..eaa5b7950 100644 --- a/vendor/github.com/RoaringBitmap/roaring/manyiterator.go +++ b/vendor/github.com/RoaringBitmap/roaring/manyiterator.go @@ -2,6 +2,7 @@ package roaring type manyIterable interface { nextMany(hs uint32, buf []uint32) int + nextMany64(hs uint64, buf []uint64) int } func (si *shortIterator) nextMany(hs uint32, buf []uint32) int { @@ -16,3 +17,16 @@ func (si *shortIterator) nextMany(hs uint32, buf []uint32) int { si.loc = l return n } + +func (si *shortIterator) nextMany64(hs uint64, buf []uint64) int { + n := 0 + l := si.loc + s := si.slice + for n < len(buf) && l < len(s) { + buf[n] = uint64(s[l]) | hs + l++ + n++ + } + si.loc = l + return n +} diff --git a/vendor/github.com/RoaringBitmap/roaring/parallel.go b/vendor/github.com/RoaringBitmap/roaring/parallel.go index 2af1aed48..9208e3e38 100644 --- a/vendor/github.com/RoaringBitmap/roaring/parallel.go +++ b/vendor/github.com/RoaringBitmap/roaring/parallel.go @@ -166,7 +166,6 @@ func appenderRoutine(bitmapChan chan<- *Bitmap, resultChan <-chan keyedContainer make([]container, 0, expectedKeys), make([]bool, 0, expectedKeys), false, - nil, }, } for i := range keys { @@ -286,14 +285,14 @@ func ParAnd(parallelism int, bitmaps ...*Bitmap) *Bitmap { for input := range inputChan { c := input.containers[0].and(input.containers[1]) for _, next := range input.containers[2:] { - if c.getCardinality() == 0 { + if c.isEmpty() { break } c = c.iand(next) } // Send a nil explicitly if the result of the intersection is an empty container - if c.getCardinality() == 0 { + if c.isEmpty() { c = nil } @@ -355,10 +354,10 @@ func ParOr(parallelism int, bitmaps ...*Bitmap) *Bitmap { if lKey == MaxUint16 && hKey == 0 { return New() } else if len(bitmaps) == 1 { - return bitmaps[0] + return bitmaps[0].Clone() } - keyRange := hKey - lKey + 1 + keyRange := int(hKey) - int(lKey) + 1 if keyRange == 1 { // revert to FastOr. Since the key range is 0 // no container-level aggregation parallelism is achievable diff --git a/vendor/github.com/RoaringBitmap/roaring/roaring.go b/vendor/github.com/RoaringBitmap/roaring/roaring.go index ed75d58b9..53068e4d9 100644 --- a/vendor/github.com/RoaringBitmap/roaring/roaring.go +++ b/vendor/github.com/RoaringBitmap/roaring/roaring.go @@ -11,7 +11,8 @@ import ( "fmt" "io" "strconv" - "sync" + + "github.com/RoaringBitmap/roaring/internal" ) // Bitmap represents a compressed bitmap where you can add integers. @@ -52,27 +53,19 @@ func (rb *Bitmap) ToBytes() ([]byte, error) { return rb.highlowcontainer.toBytes() } -// Deprecated: WriteToMsgpack writes a msgpack2/snappy-streaming compressed serialized -// version of this bitmap to stream. The format is not -// compatible with the WriteTo() format, and is -// experimental: it may produce smaller on disk -// footprint and/or be faster to read, depending -// on your content. Currently only the Go roaring -// implementation supports this format. -func (rb *Bitmap) WriteToMsgpack(stream io.Writer) (int64, error) { - return 0, rb.highlowcontainer.writeToMsgpack(stream) -} - // ReadFrom reads a serialized version of this bitmap from stream. // The format is compatible with other RoaringBitmap // implementations (Java, C) and is documented here: // https://github.com/RoaringBitmap/RoaringFormatSpec -func (rb *Bitmap) ReadFrom(reader io.Reader) (p int64, err error) { - stream := byteInputAdapterPool.Get().(*byteInputAdapter) - stream.reset(reader) +// Since io.Reader is regarded as a stream and cannot be read twice. +// So add cookieHeader to accept the 4-byte data that has been read in roaring64.ReadFrom. +// It is not necessary to pass cookieHeader when call roaring.ReadFrom to read the roaring32 data directly. +func (rb *Bitmap) ReadFrom(reader io.Reader, cookieHeader ...byte) (p int64, err error) { + stream := internal.ByteInputAdapterPool.Get().(*internal.ByteInputAdapter) + stream.Reset(reader) - p, err = rb.highlowcontainer.readFrom(stream) - byteInputAdapterPool.Put(stream) + p, err = rb.highlowcontainer.readFrom(stream, cookieHeader...) + internal.ByteInputAdapterPool.Put(stream) return } @@ -100,29 +93,15 @@ func (rb *Bitmap) ReadFrom(reader io.Reader) (p int64, err error) { // call CloneCopyOnWriteContainers on all such bitmaps. // func (rb *Bitmap) FromBuffer(buf []byte) (p int64, err error) { - stream := byteBufferPool.Get().(*byteBuffer) - stream.reset(buf) + stream := internal.ByteBufferPool.Get().(*internal.ByteBuffer) + stream.Reset(buf) p, err = rb.highlowcontainer.readFrom(stream) - byteBufferPool.Put(stream) + internal.ByteBufferPool.Put(stream) return } -var ( - byteBufferPool = sync.Pool{ - New: func() interface{} { - return &byteBuffer{} - }, - } - - byteInputAdapterPool = sync.Pool{ - New: func() interface{} { - return &byteInputAdapter{} - }, - } -) - // RunOptimize attempts to further compress the runs of consecutive values found in the bitmap func (rb *Bitmap) RunOptimize() { rb.highlowcontainer.runOptimize() @@ -133,14 +112,6 @@ func (rb *Bitmap) HasRunCompression() bool { return rb.highlowcontainer.hasRunCompression() } -// Deprecated: ReadFromMsgpack reads a msgpack2/snappy-streaming serialized -// version of this bitmap from stream. The format is -// expected is that written by the WriteToMsgpack() -// call; see additional notes there. -func (rb *Bitmap) ReadFromMsgpack(stream io.Reader) (int64, error) { - return 0, rb.highlowcontainer.readFromMsgpack(stream) -} - // MarshalBinary implements the encoding.BinaryMarshaler interface for the bitmap // (same as ToBytes) func (rb *Bitmap) MarshalBinary() ([]byte, error) { @@ -180,8 +151,7 @@ func (rb *Bitmap) ToArray() []uint32 { hs := uint32(rb.highlowcontainer.getKeyAtIndex(pos)) << 16 c := rb.highlowcontainer.getContainerAtIndex(pos) pos++ - c.fillLeastSignificant16bits(array, pos2, hs) - pos2 += c.getCardinality() + pos2 = c.fillLeastSignificant16bits(array, pos2, hs) } return array } @@ -345,8 +315,10 @@ func newIntReverseIterator(a *Bitmap) *intReverseIterator { // ManyIntIterable allows you to iterate over the values in a Bitmap type ManyIntIterable interface { - // pass in a buffer to fill up with values, returns how many values were returned - NextMany([]uint32) int + // NextMany fills buf up with values, returns how many values were returned + NextMany(buf []uint32) int + // NextMany64 fills up buf with 64 bit values, uses hs as a mask (OR), returns how many values were returned + NextMany64(hs uint64, buf []uint64) int } type manyIntIterator struct { @@ -382,6 +354,25 @@ func (ii *manyIntIterator) NextMany(buf []uint32) int { return n } +func (ii *manyIntIterator) NextMany64(hs64 uint64, buf []uint64) int { + n := 0 + for n < len(buf) { + if ii.iter == nil { + break + } + + hs := uint64(ii.hs) | hs64 + moreN := ii.iter.nextMany64(hs, buf[n:]) + n += moreN + if moreN == 0 { + ii.pos = ii.pos + 1 + ii.init() + } + } + + return n +} + func newManyIntIterator(a *Bitmap) *manyIntIterator { p := new(manyIntIterator) p.pos = 0 @@ -550,7 +541,7 @@ func AddOffset64(x *Bitmap, offset int64) (answer *Bitmap) { c := x.highlowcontainer.getContainerAtIndex(pos) offsetted := c.addOffset(inOffset) - if offsetted[0].getCardinality() > 0 && (key >= 0 && key <= MaxUint16) { + if !offsetted[0].isEmpty() && (key >= 0 && key <= MaxUint16) { curSize := answer.highlowcontainer.size() lastkey := int32(0) @@ -567,7 +558,7 @@ func AddOffset64(x *Bitmap, offset int64) (answer *Bitmap) { } } - if offsetted[1].getCardinality() > 0 && ((key+1) >= 0 && (key+1) <= MaxUint16) { + if !offsetted[1].isEmpty() && ((key+1) >= 0 && (key+1) <= MaxUint16) { answer.highlowcontainer.appendContainer(uint16(key+1), offsetted[1], false) } } @@ -638,13 +629,13 @@ func (rb *Bitmap) Remove(x uint32) { if i >= 0 { c := rb.highlowcontainer.getWritableContainerAtIndex(i).iremoveReturnMinimized(lowbits(x)) rb.highlowcontainer.setContainerAtIndex(i, c) - if rb.highlowcontainer.getContainerAtIndex(i).getCardinality() == 0 { + if rb.highlowcontainer.getContainerAtIndex(i).isEmpty() { rb.highlowcontainer.removeAtIndex(i) } } } -// CheckedRemove removes the integer x from the bitmap and return true if the integer was effectively remove (and false if the integer was not present) +// CheckedRemove removes the integer x from the bitmap and return true if the integer was effectively removed (and false if the integer was not present) func (rb *Bitmap) CheckedRemove(x uint32) bool { // TODO: add unit tests for this method hb := highbits(x) @@ -654,7 +645,7 @@ func (rb *Bitmap) CheckedRemove(x uint32) bool { oldcard := C.getCardinality() C = C.iremoveReturnMinimized(lowbits(x)) rb.highlowcontainer.setContainerAtIndex(i, C) - if rb.highlowcontainer.getContainerAtIndex(i).getCardinality() == 0 { + if rb.highlowcontainer.getContainerAtIndex(i).isEmpty() { rb.highlowcontainer.removeAtIndex(i) return true } @@ -678,7 +669,10 @@ func (rb *Bitmap) GetCardinality() uint64 { return size } -// Rank returns the number of integers that are smaller or equal to x (Rank(infinity) would be GetCardinality()) +// Rank returns the number of integers that are smaller or equal to x (Rank(infinity) would be GetCardinality()). +// If you pass the smallest value, you get the value 1. If you pass a value that is smaller than the smallest +// value, you get 0. Note that this function differs in convention from the Select function since it +// return 1 and not 0 on the smallest value. func (rb *Bitmap) Rank(x uint32) uint64 { size := uint64(0) for i := 0; i < rb.highlowcontainer.size(); i++ { @@ -695,7 +689,9 @@ func (rb *Bitmap) Rank(x uint32) uint64 { return size } -// Select returns the xth integer in the bitmap +// Select returns the xth integer in the bitmap. If you pass 0, you get +// the smallest element. Note that this function differs in convention from +// the Rank function which returns 1 on the smallest value. func (rb *Bitmap) Select(x uint32) (uint32, error) { if rb.GetCardinality() <= uint64(x) { return 0, fmt.Errorf("can't find %dth integer in a bitmap with only %d items", x, rb.GetCardinality()) @@ -704,8 +700,9 @@ func (rb *Bitmap) Select(x uint32) (uint32, error) { remaining := x for i := 0; i < rb.highlowcontainer.size(); i++ { c := rb.highlowcontainer.getContainerAtIndex(i) - if remaining >= uint32(c.getCardinality()) { - remaining -= uint32(c.getCardinality()) + card := uint32(c.getCardinality()) + if remaining >= card { + remaining -= card } else { key := rb.highlowcontainer.getKeyAtIndex(i) return uint32(key)<<16 + uint32(c.selectInt(uint16(remaining))), nil @@ -732,7 +729,7 @@ main: c1 := rb.highlowcontainer.getWritableContainerAtIndex(pos1) c2 := x2.highlowcontainer.getContainerAtIndex(pos2) diff := c1.iand(c2) - if diff.getCardinality() > 0 { + if !diff.isEmpty() { rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, diff, false) intersectionsize++ } @@ -934,7 +931,7 @@ func (rb *Bitmap) Xor(x2 *Bitmap) { } else { // TODO: couple be computed in-place for reduced memory usage c := rb.highlowcontainer.getContainerAtIndex(pos1).xor(x2.highlowcontainer.getContainerAtIndex(pos2)) - if c.getCardinality() > 0 { + if !c.isEmpty() { rb.highlowcontainer.setContainerAtIndex(pos1, c) pos1++ } else { @@ -980,7 +977,7 @@ main: } s2 = x2.highlowcontainer.getKeyAtIndex(pos2) } else { - rb.highlowcontainer.replaceKeyAndContainerAtIndex(pos1, s1, rb.highlowcontainer.getWritableContainerAtIndex(pos1).ior(x2.highlowcontainer.getContainerAtIndex(pos2)), false) + rb.highlowcontainer.replaceKeyAndContainerAtIndex(pos1, s1, rb.highlowcontainer.getUnionedWritableContainer(pos1, x2.highlowcontainer.getContainerAtIndex(pos2)), false) pos1++ pos2++ if (pos1 == length1) || (pos2 == length2) { @@ -1014,7 +1011,7 @@ main: c1 := rb.highlowcontainer.getWritableContainerAtIndex(pos1) c2 := x2.highlowcontainer.getContainerAtIndex(pos2) diff := c1.iandNot(c2) - if diff.getCardinality() > 0 { + if !diff.isEmpty() { rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, diff, false) intersectionsize++ } @@ -1123,7 +1120,7 @@ main: C := x1.highlowcontainer.getContainerAtIndex(pos1) C = C.and(x2.highlowcontainer.getContainerAtIndex(pos2)) - if C.getCardinality() > 0 { + if !C.isEmpty() { answer.highlowcontainer.appendContainer(s1, C, false) } pos1++ @@ -1170,7 +1167,7 @@ func Xor(x1, x2 *Bitmap) *Bitmap { pos2++ } else { c := x1.highlowcontainer.getContainerAtIndex(pos1).xor(x2.highlowcontainer.getContainerAtIndex(pos2)) - if c.getCardinality() > 0 { + if !c.isEmpty() { answer.highlowcontainer.appendContainer(s1, c, false) } pos1++ @@ -1213,7 +1210,7 @@ main: c1 := x1.highlowcontainer.getContainerAtIndex(pos1) c2 := x2.highlowcontainer.getContainerAtIndex(pos2) diff := c1.andNot(c2) - if diff.getCardinality() > 0 { + if !diff.isEmpty() { answer.highlowcontainer.appendContainer(s1, diff, false) } pos1++ @@ -1303,7 +1300,7 @@ func (rb *Bitmap) Flip(rangeStart, rangeEnd uint64) { if i >= 0 { c := rb.highlowcontainer.getWritableContainerAtIndex(i).inot(int(containerStart), int(containerLast)+1) - if c.getCardinality() > 0 { + if !c.isEmpty() { rb.highlowcontainer.setContainerAtIndex(i, c) } else { rb.highlowcontainer.removeAtIndex(i) @@ -1384,7 +1381,7 @@ func (rb *Bitmap) RemoveRange(rangeStart, rangeEnd uint64) { return } c := rb.highlowcontainer.getWritableContainerAtIndex(i).iremoveRange(int(lbStart), int(lbLast+1)) - if c.getCardinality() > 0 { + if !c.isEmpty() { rb.highlowcontainer.setContainerAtIndex(i, c) } else { rb.highlowcontainer.removeAtIndex(i) @@ -1397,7 +1394,7 @@ func (rb *Bitmap) RemoveRange(rangeStart, rangeEnd uint64) { if ifirst >= 0 { if lbStart != 0 { c := rb.highlowcontainer.getWritableContainerAtIndex(ifirst).iremoveRange(int(lbStart), int(max+1)) - if c.getCardinality() > 0 { + if !c.isEmpty() { rb.highlowcontainer.setContainerAtIndex(ifirst, c) ifirst++ } @@ -1408,7 +1405,7 @@ func (rb *Bitmap) RemoveRange(rangeStart, rangeEnd uint64) { if ilast >= 0 { if lbLast != max { c := rb.highlowcontainer.getWritableContainerAtIndex(ilast).iremoveRange(int(0), int(lbLast+1)) - if c.getCardinality() > 0 { + if !c.isEmpty() { rb.highlowcontainer.setContainerAtIndex(ilast, c) } else { ilast++ @@ -1464,7 +1461,7 @@ func Flip(bm *Bitmap, rangeStart, rangeEnd uint64) *Bitmap { if i >= 0 { c := bm.highlowcontainer.getContainerAtIndex(i).not(int(containerStart), int(containerLast)+1) - if c.getCardinality() > 0 { + if !c.isEmpty() { answer.highlowcontainer.insertNewKeyValueAt(-j-1, uint16(hb), c) } @@ -1555,3 +1552,27 @@ func (rb *Bitmap) Stats() Statistics { } return stats } + +func (rb *Bitmap) checkValidity() bool { + for _, c := range rb.highlowcontainer.containers { + + switch c.(type) { + case *arrayContainer: + if c.getCardinality() > arrayDefaultMaxSize { + fmt.Println("Array containers are limited to size ", arrayDefaultMaxSize) + return false + } + case *bitmapContainer: + if c.getCardinality() <= arrayDefaultMaxSize { + fmt.Println("Bitmaps would be more concise as an array!") + return false + } + case *runContainer16: + if c.getSizeInBytes() > minOfInt(bitmapContainerSizeInBytes(), arrayContainerSizeInBytes(c.getCardinality())) { + fmt.Println("Inefficient run container!") + return false + } + } + } + return true +} \ No newline at end of file diff --git a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go index 3dddbffdb..f7b7d732b 100644 --- a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go +++ b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go @@ -5,13 +5,9 @@ import ( "encoding/binary" "fmt" "io" - - snappy "github.com/glycerine/go-unsnap-stream" - "github.com/tinylib/msgp/msgp" + "github.com/RoaringBitmap/roaring/internal" ) -//go:generate msgp -unexported - type container interface { addOffset(uint16) []container @@ -21,6 +17,7 @@ type container interface { iand(container) container // i stands for inplace andNot(container) container iandNot(container) container // i stands for inplace + isEmpty() bool getCardinality() int // rank returns the number of integers that are // smaller or equal to x. rank(infinity) would be getCardinality(). @@ -51,7 +48,7 @@ type container interface { // any of the implementations. equals(r container) bool - fillLeastSignificant16bits(array []uint32, i int, mask uint32) + fillLeastSignificant16bits(array []uint32, i int, mask uint32) int or(r container) container orCardinality(r container) int isFull() bool @@ -103,18 +100,6 @@ type roaringArray struct { containers []container `msg:"-"` // don't try to serialize directly. needCopyOnWrite []bool copyOnWrite bool - - // conserz is used at serialization time - // to serialize containers. Otherwise empty. - conserz []containerSerz -} - -// containerSerz facilitates serializing container (tricky to -// serialize because it is an interface) by providing a -// light wrapper with a type identifier. -type containerSerz struct { - t contype `msg:"t"` // type - r msgp.Raw `msg:"r"` // Raw msgpack of the actual container type } func newRoaringArray() *roaringArray { @@ -246,7 +231,6 @@ func (ra *roaringArray) resize(newsize int) { func (ra *roaringArray) clear() { ra.resize(0) ra.copyOnWrite = false - ra.conserz = nil } func (ra *roaringArray) clone() *roaringArray { @@ -328,6 +312,17 @@ func (ra *roaringArray) getFastContainerAtIndex(i int, needsWriteable bool) cont return c } +// getUnionedWritableContainer switches behavior for in-place Or +// depending on whether the container requires a copy on write. +// If it does using the non-inplace or() method leads to fewer allocations. +func (ra *roaringArray) getUnionedWritableContainer(pos int, other container) container { + if ra.needCopyOnWrite[pos] { + return ra.getContainerAtIndex(pos).or(other) + } + return ra.getContainerAtIndex(pos).ior(other) + +} + func (ra *roaringArray) getWritableContainerAtIndex(i int) container { if ra.needCopyOnWrite[i] { ra.containers[i] = ra.containers[i].clone() @@ -491,11 +486,11 @@ func (ra *roaringArray) writeTo(w io.Writer) (n int64, err error) { binary.LittleEndian.PutUint16(buf[2:], uint16(len(ra.keys)-1)) nw += 2 // compute isRun bitmap without temporary allocation - var runbitmapslice = buf[nw:nw+isRunSizeInBytes] + var runbitmapslice = buf[nw : nw+isRunSizeInBytes] for i, c := range ra.containers { switch c.(type) { case *runContainer16: - runbitmapslice[i / 8] |= 1<<(uint(i)%8) + runbitmapslice[i/8] |= 1 << (uint(i) % 8) } } nw += isRunSizeInBytes @@ -555,51 +550,58 @@ func (ra *roaringArray) toBytes() ([]byte, error) { return buf.Bytes(), err } -func (ra *roaringArray) readFrom(stream byteInput) (int64, error) { - cookie, err := stream.readUInt32() - - if err != nil { - return stream.getReadBytes(), fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: %s", err) +func (ra *roaringArray) readFrom(stream internal.ByteInput, cookieHeader ...byte) (int64, error) { + var cookie uint32 + var err error + if len(cookieHeader) > 0 && len(cookieHeader) != 4 { + return int64(len(cookieHeader)), fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: incorrect size of cookie header") + } + if len(cookieHeader) == 4 { + cookie = binary.LittleEndian.Uint32(cookieHeader) + } else { + cookie, err = stream.ReadUInt32() + if err != nil { + return stream.GetReadBytes(), fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: %s", err) + } } var size uint32 var isRunBitmap []byte if cookie&0x0000FFFF == serialCookie { - size = uint32(uint16(cookie>>16) + 1) + size = uint32(cookie>>16 + 1) // create is-run-container bitmap isRunBitmapSize := (int(size) + 7) / 8 - isRunBitmap, err = stream.next(isRunBitmapSize) + isRunBitmap, err = stream.Next(isRunBitmapSize) if err != nil { - return stream.getReadBytes(), fmt.Errorf("malformed bitmap, failed to read is-run bitmap, got: %s", err) + return stream.GetReadBytes(), fmt.Errorf("malformed bitmap, failed to read is-run bitmap, got: %s", err) } } else if cookie == serialCookieNoRunContainer { - size, err = stream.readUInt32() - + size, err = stream.ReadUInt32() if err != nil { - return stream.getReadBytes(), fmt.Errorf("malformed bitmap, failed to read a bitmap size: %s", err) + return stream.GetReadBytes(), fmt.Errorf("malformed bitmap, failed to read a bitmap size: %s", err) } } else { - return stream.getReadBytes(), fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header") + return stream.GetReadBytes(), fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header") } if size > (1 << 16) { - return stream.getReadBytes(), fmt.Errorf("it is logically impossible to have more than (1<<16) containers") + return stream.GetReadBytes(), fmt.Errorf("it is logically impossible to have more than (1<<16) containers") } // descriptive header - buf, err := stream.next(2 * 2 * int(size)) + buf, err := stream.Next(2 * 2 * int(size)) if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read descriptive header: %s", err) + return stream.GetReadBytes(), fmt.Errorf("failed to read descriptive header: %s", err) } keycard := byteSliceAsUint16Slice(buf) if isRunBitmap == nil || size >= noOffsetThreshold { - if err := stream.skipBytes(int(size) * 4); err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to skip bytes: %s", err) + if err := stream.SkipBytes(int(size) * 4); err != nil { + return stream.GetReadBytes(), fmt.Errorf("failed to skip bytes: %s", err) } } @@ -630,30 +632,29 @@ func (ra *roaringArray) readFrom(stream byteInput) (int64, error) { if isRunBitmap != nil && isRunBitmap[i/8]&(1<<(i%8)) != 0 { // run container - nr, err := stream.readUInt16() + nr, err := stream.ReadUInt16() if err != nil { return 0, fmt.Errorf("failed to read runtime container size: %s", err) } - buf, err := stream.next(int(nr) * 4) + buf, err := stream.Next(int(nr) * 4) if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read runtime container content: %s", err) + return stream.GetReadBytes(), fmt.Errorf("failed to read runtime container content: %s", err) } nb := runContainer16{ iv: byteSliceAsInterval16Slice(buf), - card: int64(card), } ra.containers[i] = &nb } else if card > arrayDefaultMaxSize { // bitmap container - buf, err := stream.next(arrayDefaultMaxSize * 2) + buf, err := stream.Next(arrayDefaultMaxSize * 2) if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read bitmap container: %s", err) + return stream.GetReadBytes(), fmt.Errorf("failed to read bitmap container: %s", err) } nb := bitmapContainer{ @@ -664,10 +665,10 @@ func (ra *roaringArray) readFrom(stream byteInput) (int64, error) { ra.containers[i] = &nb } else { // array container - buf, err := stream.next(card * 2) + buf, err := stream.Next(card * 2) if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read array container: %s", err) + return stream.GetReadBytes(), fmt.Errorf("failed to read array container: %s", err) } nb := arrayContainer{ @@ -678,7 +679,7 @@ func (ra *roaringArray) readFrom(stream byteInput) (int64, error) { } } - return stream.getReadBytes(), nil + return stream.GetReadBytes(), nil } func (ra *roaringArray) hasRunCompression() bool { @@ -691,84 +692,6 @@ func (ra *roaringArray) hasRunCompression() bool { return false } -func (ra *roaringArray) writeToMsgpack(stream io.Writer) error { - - ra.conserz = make([]containerSerz, len(ra.containers)) - for i, v := range ra.containers { - switch cn := v.(type) { - case *bitmapContainer: - bts, err := cn.MarshalMsg(nil) - if err != nil { - return err - } - ra.conserz[i].t = bitmapContype - ra.conserz[i].r = bts - case *arrayContainer: - bts, err := cn.MarshalMsg(nil) - if err != nil { - return err - } - ra.conserz[i].t = arrayContype - ra.conserz[i].r = bts - case *runContainer16: - bts, err := cn.MarshalMsg(nil) - if err != nil { - return err - } - ra.conserz[i].t = run16Contype - ra.conserz[i].r = bts - default: - panic(fmt.Errorf("Unrecognized container implementation: %T", cn)) - } - } - w := snappy.NewWriter(stream) - err := msgp.Encode(w, ra) - ra.conserz = nil - return err -} - -func (ra *roaringArray) readFromMsgpack(stream io.Reader) error { - r := snappy.NewReader(stream) - err := msgp.Decode(r, ra) - if err != nil { - return err - } - - if len(ra.containers) != len(ra.keys) { - ra.containers = make([]container, len(ra.keys)) - } - - for i, v := range ra.conserz { - switch v.t { - case bitmapContype: - c := &bitmapContainer{} - _, err = c.UnmarshalMsg(v.r) - if err != nil { - return err - } - ra.containers[i] = c - case arrayContype: - c := &arrayContainer{} - _, err = c.UnmarshalMsg(v.r) - if err != nil { - return err - } - ra.containers[i] = c - case run16Contype: - c := &runContainer16{} - _, err = c.UnmarshalMsg(v.r) - if err != nil { - return err - } - ra.containers[i] = c - default: - return fmt.Errorf("unrecognized contype serialization code: '%v'", v.t) - } - } - ra.conserz = nil - return nil -} - func (ra *roaringArray) advanceUntil(min uint16, pos int) int { lower := pos + 1 diff --git a/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go b/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go deleted file mode 100644 index dcd718756..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go +++ /dev/null @@ -1,529 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import ( - "github.com/tinylib/msgp/msgp" -) - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *containerSerz) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zxvk uint32 - zxvk, err = dc.ReadMapHeader() - if err != nil { - return - } - for zxvk > 0 { - zxvk-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zbzg uint8 - zbzg, err = dc.ReadUint8() - z.t = contype(zbzg) - } - if err != nil { - return - } - case "r": - err = z.r.DecodeMsg(dc) - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *containerSerz) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "t" - err = en.Append(0x82, 0xa1, 0x74) - if err != nil { - return err - } - err = en.WriteUint8(uint8(z.t)) - if err != nil { - return - } - // write "r" - err = en.Append(0xa1, 0x72) - if err != nil { - return err - } - err = z.r.EncodeMsg(en) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *containerSerz) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "t" - o = append(o, 0x82, 0xa1, 0x74) - o = msgp.AppendUint8(o, uint8(z.t)) - // string "r" - o = append(o, 0xa1, 0x72) - o, err = z.r.MarshalMsg(o) - if err != nil { - return - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *containerSerz) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zbai uint32 - zbai, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zbai > 0 { - zbai-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zcmr uint8 - zcmr, bts, err = msgp.ReadUint8Bytes(bts) - z.t = contype(zcmr) - } - if err != nil { - return - } - case "r": - bts, err = z.r.UnmarshalMsg(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *containerSerz) Msgsize() (s int) { - s = 1 + 2 + msgp.Uint8Size + 2 + z.r.Msgsize() - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *contype) DecodeMsg(dc *msgp.Reader) (err error) { - { - var zajw uint8 - zajw, err = dc.ReadUint8() - (*z) = contype(zajw) - } - if err != nil { - return - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z contype) EncodeMsg(en *msgp.Writer) (err error) { - err = en.WriteUint8(uint8(z)) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z contype) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - o = msgp.AppendUint8(o, uint8(z)) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *contype) UnmarshalMsg(bts []byte) (o []byte, err error) { - { - var zwht uint8 - zwht, bts, err = msgp.ReadUint8Bytes(bts) - (*z) = contype(zwht) - } - if err != nil { - return - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z contype) Msgsize() (s int) { - s = msgp.Uint8Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *roaringArray) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zlqf uint32 - zlqf, err = dc.ReadMapHeader() - if err != nil { - return - } - for zlqf > 0 { - zlqf-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "keys": - var zdaf uint32 - zdaf, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.keys) >= int(zdaf) { - z.keys = (z.keys)[:zdaf] - } else { - z.keys = make([]uint16, zdaf) - } - for zhct := range z.keys { - z.keys[zhct], err = dc.ReadUint16() - if err != nil { - return - } - } - case "needCopyOnWrite": - var zpks uint32 - zpks, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.needCopyOnWrite) >= int(zpks) { - z.needCopyOnWrite = (z.needCopyOnWrite)[:zpks] - } else { - z.needCopyOnWrite = make([]bool, zpks) - } - for zcua := range z.needCopyOnWrite { - z.needCopyOnWrite[zcua], err = dc.ReadBool() - if err != nil { - return - } - } - case "copyOnWrite": - z.copyOnWrite, err = dc.ReadBool() - if err != nil { - return - } - case "conserz": - var zjfb uint32 - zjfb, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.conserz) >= int(zjfb) { - z.conserz = (z.conserz)[:zjfb] - } else { - z.conserz = make([]containerSerz, zjfb) - } - for zxhx := range z.conserz { - var zcxo uint32 - zcxo, err = dc.ReadMapHeader() - if err != nil { - return - } - for zcxo > 0 { - zcxo-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zeff uint8 - zeff, err = dc.ReadUint8() - z.conserz[zxhx].t = contype(zeff) - } - if err != nil { - return - } - case "r": - err = z.conserz[zxhx].r.DecodeMsg(dc) - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *roaringArray) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 4 - // write "keys" - err = en.Append(0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.keys))) - if err != nil { - return - } - for zhct := range z.keys { - err = en.WriteUint16(z.keys[zhct]) - if err != nil { - return - } - } - // write "needCopyOnWrite" - err = en.Append(0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.needCopyOnWrite))) - if err != nil { - return - } - for zcua := range z.needCopyOnWrite { - err = en.WriteBool(z.needCopyOnWrite[zcua]) - if err != nil { - return - } - } - // write "copyOnWrite" - err = en.Append(0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - if err != nil { - return err - } - err = en.WriteBool(z.copyOnWrite) - if err != nil { - return - } - // write "conserz" - err = en.Append(0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.conserz))) - if err != nil { - return - } - for zxhx := range z.conserz { - // map header, size 2 - // write "t" - err = en.Append(0x82, 0xa1, 0x74) - if err != nil { - return err - } - err = en.WriteUint8(uint8(z.conserz[zxhx].t)) - if err != nil { - return - } - // write "r" - err = en.Append(0xa1, 0x72) - if err != nil { - return err - } - err = z.conserz[zxhx].r.EncodeMsg(en) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *roaringArray) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 4 - // string "keys" - o = append(o, 0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.keys))) - for zhct := range z.keys { - o = msgp.AppendUint16(o, z.keys[zhct]) - } - // string "needCopyOnWrite" - o = append(o, 0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - o = msgp.AppendArrayHeader(o, uint32(len(z.needCopyOnWrite))) - for zcua := range z.needCopyOnWrite { - o = msgp.AppendBool(o, z.needCopyOnWrite[zcua]) - } - // string "copyOnWrite" - o = append(o, 0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - o = msgp.AppendBool(o, z.copyOnWrite) - // string "conserz" - o = append(o, 0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a) - o = msgp.AppendArrayHeader(o, uint32(len(z.conserz))) - for zxhx := range z.conserz { - // map header, size 2 - // string "t" - o = append(o, 0x82, 0xa1, 0x74) - o = msgp.AppendUint8(o, uint8(z.conserz[zxhx].t)) - // string "r" - o = append(o, 0xa1, 0x72) - o, err = z.conserz[zxhx].r.MarshalMsg(o) - if err != nil { - return - } - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *roaringArray) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zrsw uint32 - zrsw, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zrsw > 0 { - zrsw-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "keys": - var zxpk uint32 - zxpk, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.keys) >= int(zxpk) { - z.keys = (z.keys)[:zxpk] - } else { - z.keys = make([]uint16, zxpk) - } - for zhct := range z.keys { - z.keys[zhct], bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - } - case "needCopyOnWrite": - var zdnj uint32 - zdnj, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.needCopyOnWrite) >= int(zdnj) { - z.needCopyOnWrite = (z.needCopyOnWrite)[:zdnj] - } else { - z.needCopyOnWrite = make([]bool, zdnj) - } - for zcua := range z.needCopyOnWrite { - z.needCopyOnWrite[zcua], bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - return - } - } - case "copyOnWrite": - z.copyOnWrite, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - return - } - case "conserz": - var zobc uint32 - zobc, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.conserz) >= int(zobc) { - z.conserz = (z.conserz)[:zobc] - } else { - z.conserz = make([]containerSerz, zobc) - } - for zxhx := range z.conserz { - var zsnv uint32 - zsnv, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zsnv > 0 { - zsnv-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zkgt uint8 - zkgt, bts, err = msgp.ReadUint8Bytes(bts) - z.conserz[zxhx].t = contype(zkgt) - } - if err != nil { - return - } - case "r": - bts, err = z.conserz[zxhx].r.UnmarshalMsg(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *roaringArray) Msgsize() (s int) { - s = 1 + 5 + msgp.ArrayHeaderSize + (len(z.keys) * (msgp.Uint16Size)) + 16 + msgp.ArrayHeaderSize + (len(z.needCopyOnWrite) * (msgp.BoolSize)) + 12 + msgp.BoolSize + 8 + msgp.ArrayHeaderSize - for zxhx := range z.conserz { - s += 1 + 2 + msgp.Uint8Size + 2 + z.conserz[zxhx].r.Msgsize() - } - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/runcontainer.go b/vendor/github.com/RoaringBitmap/roaring/runcontainer.go index 5a0f985f1..a722760b4 100644 --- a/vendor/github.com/RoaringBitmap/roaring/runcontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/runcontainer.go @@ -44,16 +44,10 @@ import ( "unsafe" ) -//go:generate msgp -unexported - // runContainer16 does run-length encoding of sets of // uint16 integers. type runContainer16 struct { - iv []interval16 - card int64 - - // avoid allocation during search - myOpts searchOptions `msg:"-"` + iv []interval16 } // interval16 is the internal to runContainer16 @@ -76,8 +70,8 @@ func newInterval16Range(start, last uint16) interval16 { } // runlen returns the count of integers in the interval. -func (iv interval16) runlen() int64 { - return int64(iv.length) + 1 +func (iv interval16) runlen() int { + return int(iv.length) + 1 } func (iv interval16) last() uint16 { @@ -120,8 +114,6 @@ func (p uint16Slice) Less(i, j int) bool { return p[i] < p[j] } // Swap swaps elements i and j. func (p uint16Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -//msgp:ignore addHelper - // addHelper helps build a runContainer16. type addHelper16 struct { runstart uint16 @@ -201,7 +193,6 @@ func newRunContainer16FromVals(alreadySorted bool, vals ...uint16) *runContainer ah.storeIval(ah.runstart, ah.runlen) } rc.iv = ah.m - rc.card = int64(ah.actuallyAdded) return rc } @@ -291,7 +282,6 @@ func newRunContainer16FromArray(arr *arrayContainer) *runContainer16 { ah.storeIval(ah.runstart, ah.runlen) } rc.iv = ah.m - rc.card = int64(ah.actuallyAdded) return rc } @@ -308,7 +298,6 @@ func (rc *runContainer16) set(alreadySorted bool, vals ...uint16) { rc2 := newRunContainer16FromVals(alreadySorted, vals...) un := rc.union(rc2) rc.iv = un.iv - rc.card = 0 } // canMerge returns true iff the intervals @@ -316,10 +305,10 @@ func (rc *runContainer16) set(alreadySorted bool, vals ...uint16) { // contiguous and so can be merged into // a single interval. func canMerge16(a, b interval16) bool { - if int64(a.last())+1 < int64(b.start) { + if int(a.last())+1 < int(b.start) { return false } - return int64(b.last())+1 >= int64(a.start) + return int(b.last())+1 >= int(a.start) } // haveOverlap differs from canMerge in that @@ -328,10 +317,10 @@ func canMerge16(a, b interval16) bool { // it would be the empty set, and we return // false). func haveOverlap16(a, b interval16) bool { - if int64(a.last())+1 <= int64(b.start) { + if int(a.last())+1 <= int(b.start) { return false } - return int64(b.last())+1 > int64(a.start) + return int(b.last())+1 > int(a.start) } // mergeInterval16s joins a and b into a @@ -392,11 +381,11 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { var m []interval16 - alim := int64(len(rc.iv)) - blim := int64(len(b.iv)) + alim := int(len(rc.iv)) + blim := int(len(b.iv)) - var na int64 // next from a - var nb int64 // next from b + var na int // next from a + var nb int // next from b // merged holds the current merge output, which might // get additional merges before being appended to m. @@ -416,12 +405,12 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { mergedUpdated := false if canMerge16(cura, merged) { merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) mergedUpdated = true } if canMerge16(curb, merged) { merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) mergedUpdated = true } if !mergedUpdated { @@ -444,8 +433,8 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { } else { merged = mergeInterval16s(cura, curb) mergedUsed = true - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) } } } @@ -464,7 +453,7 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { cura = rc.iv[na] if canMerge16(cura, merged) { merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) } else { break aAdds } @@ -478,7 +467,7 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { curb = b.iv[nb] if canMerge16(curb, merged) { merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) } else { break bAdds } @@ -500,17 +489,17 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { } // unionCardinality returns the cardinality of the merger of two runContainer16s, the union of rc and b. -func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { +func (rc *runContainer16) unionCardinality(b *runContainer16) uint { // rc is also known as 'a' here, but golint insisted we // call it rc for consistency with the rest of the methods. - answer := uint64(0) + answer := uint(0) - alim := int64(len(rc.iv)) - blim := int64(len(b.iv)) + alim := int(len(rc.iv)) + blim := int(len(b.iv)) - var na int64 // next from a - var nb int64 // next from b + var na int // next from a + var nb int // next from b // merged holds the current merge output, which might // get additional merges before being appended to m. @@ -530,18 +519,18 @@ func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { mergedUpdated := false if canMerge16(cura, merged) { merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) mergedUpdated = true } if canMerge16(curb, merged) { merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) mergedUpdated = true } if !mergedUpdated { // we know that merged is disjoint from cura and curb //m = append(m, merged) - answer += uint64(merged.last()) - uint64(merged.start) + 1 + answer += uint(merged.last()) - uint(merged.start) + 1 mergedUsed = false } continue @@ -550,19 +539,19 @@ func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { // !mergedUsed if !canMerge16(cura, curb) { if cura.start < curb.start { - answer += uint64(cura.last()) - uint64(cura.start) + 1 + answer += uint(cura.last()) - uint(cura.start) + 1 //m = append(m, cura) na++ } else { - answer += uint64(curb.last()) - uint64(curb.start) + 1 + answer += uint(curb.last()) - uint(curb.start) + 1 //m = append(m, curb) nb++ } } else { merged = mergeInterval16s(cura, curb) mergedUsed = true - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) } } } @@ -581,7 +570,7 @@ func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { cura = rc.iv[na] if canMerge16(cura, merged) { merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) } else { break aAdds } @@ -595,7 +584,7 @@ func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { curb = b.iv[nb] if canMerge16(curb, merged) { merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) } else { break bAdds } @@ -604,23 +593,20 @@ func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { } //m = append(m, merged) - answer += uint64(merged.last()) - uint64(merged.start) + 1 + answer += uint(merged.last()) - uint(merged.start) + 1 } for _, r := range rc.iv[na:] { - answer += uint64(r.last()) - uint64(r.start) + 1 + answer += uint(r.last()) - uint(r.start) + 1 } for _, r := range b.iv[nb:] { - answer += uint64(r.last()) - uint64(r.start) + 1 + answer += uint(r.last()) - uint(r.start) + 1 } return answer } // indexOfIntervalAtOrAfter is a helper for union. -func (rc *runContainer16) indexOfIntervalAtOrAfter(key int64, startIndex int64) int64 { - rc.myOpts.startIndex = startIndex - rc.myOpts.endxIndex = 0 - - w, already, _ := rc.search(key, &rc.myOpts) +func (rc *runContainer16) indexOfIntervalAtOrAfter(key int, startIndex int) int { + w, already, _ := rc.searchRange(key, startIndex, 0) if already { return w } @@ -632,8 +618,8 @@ func (rc *runContainer16) indexOfIntervalAtOrAfter(key int64, startIndex int64) func (rc *runContainer16) intersect(b *runContainer16) *runContainer16 { a := rc - numa := int64(len(a.iv)) - numb := int64(len(b.iv)) + numa := int(len(a.iv)) + numb := int(len(b.iv)) res := &runContainer16{} if numa == 0 || numb == 0 { return res @@ -647,21 +633,21 @@ func (rc *runContainer16) intersect(b *runContainer16) *runContainer16 { var output []interval16 - var acuri int64 - var bcuri int64 + var acuri int + var bcuri int - astart := int64(a.iv[acuri].start) - bstart := int64(b.iv[bcuri].start) + astart := int(a.iv[acuri].start) + bstart := int(b.iv[bcuri].start) var intersection interval16 - var leftoverstart int64 + var leftoverstart int var isOverlap, isLeftoverA, isLeftoverB bool var done bool toploop: for acuri < numa && bcuri < numb { isOverlap, isLeftoverA, isLeftoverB, leftoverstart, intersection = - intersectWithLeftover16(astart, int64(a.iv[acuri].last()), bstart, int64(b.iv[bcuri].last())) + intersectWithLeftover16(astart, int(a.iv[acuri].last()), bstart, int(b.iv[bcuri].last())) if !isOverlap { switch { @@ -670,17 +656,14 @@ toploop: if done { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) case astart > bstart: bcuri, done = b.findNextIntervalThatIntersectsStartingFrom(bcuri+1, astart) if done { break toploop } - bstart = int64(b.iv[bcuri].start) - - //default: - // panic("impossible that astart == bstart, since !isOverlap") + bstart = int(b.iv[bcuri].start) } } else { @@ -695,7 +678,7 @@ toploop: if bcuri >= numb { break toploop } - bstart = int64(b.iv[bcuri].start) + bstart = int(b.iv[bcuri].start) case isLeftoverB: // note that we change bstart without advancing bcuri, // since we need to capture any 2ndary intersections with b.iv[bcuri] @@ -704,27 +687,23 @@ toploop: if acuri >= numa { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) default: // neither had leftover, both completely consumed - // optionally, assert for sanity: - //if a.iv[acuri].endx != b.iv[bcuri].endx { - // panic("huh? should only be possible that endx agree now!") - //} // advance to next a interval acuri++ if acuri >= numa { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) // advance to next b interval bcuri++ if bcuri >= numb { break toploop } - bstart = int64(b.iv[bcuri].start) + bstart = int(b.iv[bcuri].start) } } } // end for toploop @@ -739,12 +718,12 @@ toploop: // intersectCardinality returns the cardinality of the // intersection of rc (also known as 'a') and b. -func (rc *runContainer16) intersectCardinality(b *runContainer16) int64 { - answer := int64(0) +func (rc *runContainer16) intersectCardinality(b *runContainer16) int { + answer := int(0) a := rc - numa := int64(len(a.iv)) - numb := int64(len(b.iv)) + numa := int(len(a.iv)) + numb := int(len(b.iv)) if numa == 0 || numb == 0 { return 0 } @@ -755,14 +734,14 @@ func (rc *runContainer16) intersectCardinality(b *runContainer16) int64 { } } - var acuri int64 - var bcuri int64 + var acuri int + var bcuri int - astart := int64(a.iv[acuri].start) - bstart := int64(b.iv[bcuri].start) + astart := int(a.iv[acuri].start) + bstart := int(b.iv[bcuri].start) var intersection interval16 - var leftoverstart int64 + var leftoverstart int var isOverlap, isLeftoverA, isLeftoverB bool var done bool pass := 0 @@ -771,7 +750,7 @@ toploop: pass++ isOverlap, isLeftoverA, isLeftoverB, leftoverstart, intersection = - intersectWithLeftover16(astart, int64(a.iv[acuri].last()), bstart, int64(b.iv[bcuri].last())) + intersectWithLeftover16(astart, int(a.iv[acuri].last()), bstart, int(b.iv[bcuri].last())) if !isOverlap { switch { @@ -780,22 +759,19 @@ toploop: if done { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) case astart > bstart: bcuri, done = b.findNextIntervalThatIntersectsStartingFrom(bcuri+1, astart) if done { break toploop } - bstart = int64(b.iv[bcuri].start) - - //default: - // panic("impossible that astart == bstart, since !isOverlap") + bstart = int(b.iv[bcuri].start) } } else { // isOverlap - answer += int64(intersection.last()) - int64(intersection.start) + 1 + answer += int(intersection.last()) - int(intersection.start) + 1 switch { case isLeftoverA: // note that we change astart without advancing acuri, @@ -805,7 +781,7 @@ toploop: if bcuri >= numb { break toploop } - bstart = int64(b.iv[bcuri].start) + bstart = int(b.iv[bcuri].start) case isLeftoverB: // note that we change bstart without advancing bcuri, // since we need to capture any 2ndary intersections with b.iv[bcuri] @@ -814,27 +790,23 @@ toploop: if acuri >= numa { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) default: // neither had leftover, both completely consumed - // optionally, assert for sanity: - //if a.iv[acuri].endx != b.iv[bcuri].endx { - // panic("huh? should only be possible that endx agree now!") - //} // advance to next a interval acuri++ if acuri >= numa { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) // advance to next b interval bcuri++ if bcuri >= numb { break toploop } - bstart = int64(b.iv[bcuri].start) + bstart = int(b.iv[bcuri].start) } } } // end for toploop @@ -844,7 +816,7 @@ toploop: // get returns true iff key is in the container. func (rc *runContainer16) contains(key uint16) bool { - _, in, _ := rc.search(int64(key), nil) + _, in, _ := rc.search(int(key)) return in } @@ -853,22 +825,7 @@ func (rc *runContainer16) numIntervals() int { return len(rc.iv) } -// searchOptions allows us to accelerate search with -// prior knowledge of (mostly lower) bounds. This is used by Union -// and Intersect. -type searchOptions struct { - // start here instead of at 0 - startIndex int64 - - // upper bound instead of len(rc.iv); - // endxIndex == 0 means ignore the bound and use - // endxIndex == n ==len(rc.iv) which is also - // naturally the default for search() - // when opt = nil. - endxIndex int64 -} - -// search returns alreadyPresent to indicate if the +// searchRange returns alreadyPresent to indicate if the // key is already in one of our interval16s. // // If key is alreadyPresent, then whichInterval16 tells @@ -892,24 +849,16 @@ type searchOptions struct { // // runContainer16.search always returns whichInterval16 < len(rc.iv). // -// If not nil, opts can be used to further restrict -// the search space. +// The search space is from startIndex to endxIndex. If endxIndex is set to zero, then there +// no upper bound. // -func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval16 int64, alreadyPresent bool, numCompares int) { - n := int64(len(rc.iv)) +func (rc *runContainer16) searchRange(key int, startIndex int, endxIndex int) (whichInterval16 int, alreadyPresent bool, numCompares int) { + n := int(len(rc.iv)) if n == 0 { return -1, false, 0 } - - startIndex := int64(0) - endxIndex := n - if opts != nil { - startIndex = opts.startIndex - - // let endxIndex == 0 mean no effect - if opts.endxIndex > 0 { - endxIndex = opts.endxIndex - } + if endxIndex == 0 { + endxIndex = n } // sort.Search returns the smallest index i @@ -927,7 +876,7 @@ func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval1 h := i + (j-i)/2 // avoid overflow when computing h as the bisector // i <= h < j numCompares++ - if !(key < int64(rc.iv[h].start)) { + if !(key < int(rc.iv[h].start)) { i = h + 1 } else { j = h @@ -947,7 +896,7 @@ func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval1 if below == n { // all falses => key is >= start of all interval16s // ... so does it belong to the last interval16? - if key < int64(rc.iv[n-1].last())+1 { + if key < int(rc.iv[n-1].last())+1 { // yes, it belongs to the last interval16 alreadyPresent = true return @@ -968,7 +917,7 @@ func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval1 // key is < rc.iv[below].start // is key in below-1 interval16? - if key >= int64(rc.iv[below-1].start) && key < int64(rc.iv[below-1].last())+1 { + if key >= int(rc.iv[below-1].start) && key < int(rc.iv[below-1].last())+1 { // yes, it is. key is in below-1 interval16. alreadyPresent = true return @@ -979,28 +928,55 @@ func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval1 return } -// cardinality returns the count of the integers stored in the -// runContainer16. -func (rc *runContainer16) cardinality() int64 { - if len(rc.iv) == 0 { - rc.card = 0 - return 0 - } - if rc.card > 0 { - return rc.card // already cached - } +// search returns alreadyPresent to indicate if the +// key is already in one of our interval16s. +// +// If key is alreadyPresent, then whichInterval16 tells +// you where. +// +// If key is not already present, then whichInterval16 is +// set as follows: +// +// a) whichInterval16 == len(rc.iv)-1 if key is beyond our +// last interval16 in rc.iv; +// +// b) whichInterval16 == -1 if key is before our first +// interval16 in rc.iv; +// +// c) whichInterval16 is set to the minimum index of rc.iv +// which comes strictly before the key; +// so rc.iv[whichInterval16].last < key, +// and if whichInterval16+1 exists, then key < rc.iv[whichInterval16+1].start +// (Note that whichInterval16+1 won't exist when +// whichInterval16 is the last interval.) +// +// runContainer16.search always returns whichInterval16 < len(rc.iv). +// +func (rc *runContainer16) search(key int) (whichInterval16 int, alreadyPresent bool, numCompares int) { + return rc.searchRange(key, 0, 0) +} + +// getCardinality returns the count of the integers stored in the +// runContainer16. The running complexity depends on the size +// of the container. +func (rc *runContainer16) getCardinality() int { // have to compute it - var n int64 + n := 0 for _, p := range rc.iv { n += p.runlen() } - rc.card = n // cache it return n } +// isEmpty returns true if the container is empty. +// It runs in constant time. +func (rc *runContainer16) isEmpty() bool { + return len(rc.iv) == 0 +} + // AsSlice decompresses the contents into a []uint16 slice. func (rc *runContainer16) AsSlice() []uint16 { - s := make([]uint16, rc.cardinality()) + s := make([]uint16, rc.getCardinality()) j := 0 for _, p := range rc.iv { for i := p.start; i <= p.last(); i++ { @@ -1070,19 +1046,15 @@ func (rc *runContainer16) Add(k uint16) (wasNew bool) { // but note that some unit tests use this method to build up test // runcontainers without calling runOptimize - k64 := int64(k) + k64 := int(k) - index, present, _ := rc.search(k64, nil) + index, present, _ := rc.search(k64) if present { return // already there } wasNew = true - // increment card if it is cached already - if rc.card > 0 { - rc.card++ - } - n := int64(len(rc.iv)) + n := int(len(rc.iv)) if index == -1 { // we may need to extend the first run if n > 0 { @@ -1099,7 +1071,7 @@ func (rc *runContainer16) Add(k uint16) (wasNew bool) { // are we off the end? handle both index == n and index == n-1: if index >= n-1 { - if int64(rc.iv[n-1].last())+1 == k64 { + if int(rc.iv[n-1].last())+1 == k64 { rc.iv[n-1].length++ return } @@ -1118,7 +1090,7 @@ func (rc *runContainer16) Add(k uint16) (wasNew bool) { right := index + 1 // are we fusing left and right by adding k? - if int64(rc.iv[left].last())+1 == k64 && int64(rc.iv[right].start) == k64+1 { + if int(rc.iv[left].last())+1 == k64 && int(rc.iv[right].start) == k64+1 { // fuse into left rc.iv[left].length = rc.iv[right].last() - rc.iv[left].start // remove redundant right @@ -1127,14 +1099,14 @@ func (rc *runContainer16) Add(k uint16) (wasNew bool) { } // are we an addition to left? - if int64(rc.iv[left].last())+1 == k64 { + if int(rc.iv[left].last())+1 == k64 { // yes rc.iv[left].length++ return } // are we an addition to right? - if int64(rc.iv[right].start) == k64+1 { + if int(rc.iv[right].start) == k64+1 { // yes rc.iv[right].start = k rc.iv[right].length++ @@ -1147,13 +1119,11 @@ func (rc *runContainer16) Add(k uint16) (wasNew bool) { return } -//msgp:ignore runIterator - // runIterator16 advice: you must call hasNext() // before calling next()/peekNext() to insure there are contents. type runIterator16 struct { rc *runContainer16 - curIndex int64 + curIndex int curPosInIndex uint16 } @@ -1178,8 +1148,8 @@ func (rc *runContainer16) iterate(cb func(x uint16) bool) bool { // returns true when there is at least one more value // available in the iteration sequence. func (ri *runIterator16) hasNext() bool { - return int64(len(ri.rc.iv)) > ri.curIndex+1 || - (int64(len(ri.rc.iv)) == ri.curIndex+1 && ri.rc.iv[ri.curIndex].length >= ri.curPosInIndex) + return int(len(ri.rc.iv)) > ri.curIndex+1 || + (int(len(ri.rc.iv)) == ri.curIndex+1 && ri.rc.iv[ri.curIndex].length >= ri.curPosInIndex) } // next returns the next value in the iteration sequence. @@ -1207,13 +1177,8 @@ func (ri *runIterator16) advanceIfNeeded(minval uint16) { return } - opt := &searchOptions{ - startIndex: ri.curIndex, - endxIndex: int64(len(ri.rc.iv)), - } - // interval cannot be -1 because of minval > peekNext - interval, isPresent, _ := ri.rc.search(int64(minval), opt) + interval, isPresent, _ := ri.rc.searchRange(int(minval), ri.curIndex, int(len(ri.rc.iv))) // if the minval is present, set the curPosIndex at the right position if isPresent { @@ -1231,13 +1196,13 @@ func (ri *runIterator16) advanceIfNeeded(minval uint16) { // before calling next() to insure there are contents. type runReverseIterator16 struct { rc *runContainer16 - curIndex int64 // index into rc.iv + curIndex int // index into rc.iv curPosInIndex uint16 // offset in rc.iv[curIndex] } // newRunReverseIterator16 returns a new empty run iterator. func (rc *runContainer16) newRunReverseIterator16() *runReverseIterator16 { - index := int64(len(rc.iv)) - 1 + index := int(len(rc.iv)) - 1 pos := uint16(0) if index >= 0 { @@ -1310,7 +1275,48 @@ func (ri *runIterator16) nextMany(hs uint32, buf []uint32) int { ri.curPosInIndex = 0 ri.curIndex++ - if ri.curIndex == int64(len(ri.rc.iv)) { + if ri.curIndex == int(len(ri.rc.iv)) { + break + } + } else { + ri.curPosInIndex += uint16(moreVals) //moreVals always fits in uint16 + } + } + + return n +} + +func (ri *runIterator16) nextMany64(hs uint64, buf []uint64) int { + n := 0 + + if !ri.hasNext() { + return n + } + + // start and end are inclusive + for n < len(buf) { + moreVals := 0 + + if ri.rc.iv[ri.curIndex].length >= ri.curPosInIndex { + // add as many as you can from this seq + moreVals = minOfInt(int(ri.rc.iv[ri.curIndex].length-ri.curPosInIndex)+1, len(buf)-n) + base := uint64(ri.rc.iv[ri.curIndex].start+ri.curPosInIndex) | hs + + // allows BCE + buf2 := buf[n : n+moreVals] + for i := range buf2 { + buf2[i] = base + uint64(i) + } + + // update values + n += moreVals + } + + if moreVals+int(ri.curPosInIndex) > int(ri.rc.iv[ri.curIndex].length) { + ri.curPosInIndex = 0 + ri.curIndex++ + + if ri.curIndex == int(len(ri.rc.iv)) { break } } else { @@ -1324,8 +1330,8 @@ func (ri *runIterator16) nextMany(hs uint32, buf []uint32) int { // remove removes key from the container. func (rc *runContainer16) removeKey(key uint16) (wasPresent bool) { - var index int64 - index, wasPresent, _ = rc.search(int64(key), nil) + var index int + index, wasPresent, _ = rc.search(int(key)) if !wasPresent { return // already removed, nothing to do. } @@ -1336,15 +1342,14 @@ func (rc *runContainer16) removeKey(key uint16) (wasPresent bool) { // internal helper functions -func (rc *runContainer16) deleteAt(curIndex *int64, curPosInIndex *uint16) { - rc.card-- +func (rc *runContainer16) deleteAt(curIndex *int, curPosInIndex *uint16) { ci := *curIndex pos := *curPosInIndex // are we first, last, or in the middle of our interval16? switch { case pos == 0: - if int64(rc.iv[ci].length) == 0 { + if int(rc.iv[ci].length) == 0 { // our interval disappears rc.iv = append(rc.iv[:ci], rc.iv[ci+1:]...) // curIndex stays the same, since the delete did @@ -1365,8 +1370,8 @@ func (rc *runContainer16) deleteAt(curIndex *int64, curPosInIndex *uint16) { // split into two, adding an interval16 new0 := newInterval16Range(rc.iv[ci].start, rc.iv[ci].start+*curPosInIndex-1) - new1start := int64(rc.iv[ci].start+*curPosInIndex) + 1 - if new1start > int64(MaxUint16) { + new1start := int(rc.iv[ci].start+*curPosInIndex) + 1 + if new1start > int(MaxUint16) { panic("overflow?!?!") } new1 := newInterval16Range(uint16(new1start), rc.iv[ci].last()) @@ -1379,14 +1384,14 @@ func (rc *runContainer16) deleteAt(curIndex *int64, curPosInIndex *uint16) { } -func have4Overlap16(astart, alast, bstart, blast int64) bool { +func have4Overlap16(astart, alast, bstart, blast int) bool { if alast+1 <= bstart { return false } return blast+1 > astart } -func intersectWithLeftover16(astart, alast, bstart, blast int64) (isOverlap, isLeftoverA, isLeftoverB bool, leftoverstart int64, intersection interval16) { +func intersectWithLeftover16(astart, alast, bstart, blast int) (isOverlap, isLeftoverA, isLeftoverB bool, leftoverstart int, intersection interval16) { if !have4Overlap16(astart, alast, bstart, blast) { return } @@ -1416,17 +1421,13 @@ func intersectWithLeftover16(astart, alast, bstart, blast int64) (isOverlap, isL return } -func (rc *runContainer16) findNextIntervalThatIntersectsStartingFrom(startIndex int64, key int64) (index int64, done bool) { - - rc.myOpts.startIndex = startIndex - rc.myOpts.endxIndex = 0 - - w, _, _ := rc.search(key, &rc.myOpts) +func (rc *runContainer16) findNextIntervalThatIntersectsStartingFrom(startIndex int, key int) (index int, done bool) { + w, _, _ := rc.searchRange(key, startIndex, 0) // rc.search always returns w < len(rc.iv) if w < startIndex { // not found and comes before lower bound startIndex, // so just use the lower bound. - if startIndex == int64(len(rc.iv)) { + if startIndex == int(len(rc.iv)) { // also this bump up means that we are done return startIndex, true } @@ -1444,25 +1445,6 @@ func sliceToString16(m []interval16) string { return s } -// selectInt16 returns the j-th value in the container. -// We panic of j is out of bounds. -func (rc *runContainer16) selectInt16(j uint16) int { - n := rc.cardinality() - if int64(j) > n { - panic(fmt.Sprintf("Cannot select %v since Cardinality is %v", j, n)) - } - - var offset int64 - for k := range rc.iv { - nextOffset := offset + rc.iv[k].runlen() - if nextOffset > int64(j) { - return int(int64(rc.iv[k].start) + (int64(j) - offset)) - } - offset = nextOffset - } - panic(fmt.Sprintf("Cannot select %v since Cardinality is %v", j, n)) -} - // helper for invert func (rc *runContainer16) invertlastInterval(origin uint16, lastIdx int) []interval16 { cur := rc.iv[lastIdx] @@ -1494,7 +1476,7 @@ func (rc *runContainer16) invert() *runContainer16 { case 1: return &runContainer16{iv: rc.invertlastInterval(0, 0)} } - var invstart int64 + var invstart int ult := ni - 1 for i, cur := range rc.iv { if i == ult { @@ -1513,7 +1495,7 @@ func (rc *runContainer16) invert() *runContainer16 { if cur.start > 0 { m = append(m, newInterval16Range(uint16(invstart), cur.start-1)) } - invstart = int64(cur.last() + 1) + invstart = int(cur.last() + 1) } return &runContainer16{iv: m} } @@ -1526,7 +1508,7 @@ func (iv interval16) isSuperSetOf(b interval16) bool { return iv.start <= b.start && b.last() <= iv.last() } -func (iv interval16) subtractInterval(del interval16) (left []interval16, delcount int64) { +func (iv interval16) subtractInterval(del interval16) (left []interval16, delcount int) { isect, isEmpty := intersectInterval16s(iv, del) if isEmpty { @@ -1551,7 +1533,7 @@ func (iv interval16) subtractInterval(del interval16) (left []interval16, delcou func (rc *runContainer16) isubtract(del interval16) { origiv := make([]interval16, len(rc.iv)) copy(origiv, rc.iv) - n := int64(len(rc.iv)) + n := int(len(rc.iv)) if n == 0 { return // already done. } @@ -1562,9 +1544,8 @@ func (rc *runContainer16) isubtract(del interval16) { } // INVAR there is some intersection between rc and del - istart, startAlready, _ := rc.search(int64(del.start), nil) - ilast, lastAlready, _ := rc.search(int64(del.last()), nil) - rc.card = -1 + istart, startAlready, _ := rc.search(int(del.start)) + ilast, lastAlready, _ := rc.search(int(del.last())) if istart == -1 { if ilast == n-1 && !lastAlready { rc.iv = nil @@ -1579,8 +1560,8 @@ func (rc *runContainer16) isubtract(del interval16) { // would overwrite values in iv b/c res0 can have len 2. so // write to origiv instead. lost := 1 + ilast - istart - changeSize := int64(len(res0)) - lost - newSize := int64(len(rc.iv)) + changeSize + changeSize := int(len(res0)) - lost + newSize := int(len(rc.iv)) + changeSize // rc.iv = append(pre, caboose...) // return @@ -1588,19 +1569,19 @@ func (rc *runContainer16) isubtract(del interval16) { if ilast != istart { res1, _ := rc.iv[ilast].subtractInterval(del) res0 = append(res0, res1...) - changeSize = int64(len(res0)) - lost - newSize = int64(len(rc.iv)) + changeSize + changeSize = int(len(res0)) - lost + newSize = int(len(rc.iv)) + changeSize } switch { case changeSize < 0: // shrink - copy(rc.iv[istart+int64(len(res0)):], rc.iv[ilast+1:]) - copy(rc.iv[istart:istart+int64(len(res0))], res0) + copy(rc.iv[istart+int(len(res0)):], rc.iv[ilast+1:]) + copy(rc.iv[istart:istart+int(len(res0))], res0) rc.iv = rc.iv[:newSize] return case changeSize == 0: // stay the same - copy(rc.iv[istart:istart+int64(len(res0))], res0) + copy(rc.iv[istart:istart+int(len(res0))], res0) return default: // changeSize > 0 is only possible when ilast == istart. @@ -1657,7 +1638,7 @@ func (rc *runContainer16) isubtract(del interval16) { // INVAR: ilast < n-1 lost := ilast - istart changeSize := -lost - newSize := int64(len(rc.iv)) + changeSize + newSize := int(len(rc.iv)) + changeSize if changeSize != 0 { copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:]) } @@ -1674,8 +1655,8 @@ func (rc *runContainer16) isubtract(del interval16) { rc.iv[istart] = res0[0] } lost := 1 + (ilast - istart) - changeSize := int64(len(res0)) - lost - newSize := int64(len(rc.iv)) + changeSize + changeSize := int(len(res0)) - lost + newSize := int(len(rc.iv)) + changeSize if changeSize != 0 { copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:]) } @@ -1686,8 +1667,8 @@ func (rc *runContainer16) isubtract(del interval16) { // we can only shrink or stay the same size res1, _ := rc.iv[ilast].subtractInterval(del) lost := ilast - istart - changeSize := int64(len(res1)) - lost - newSize := int64(len(rc.iv)) + changeSize + changeSize := int(len(res1)) - lost + newSize := int(len(rc.iv)) + changeSize if changeSize != 0 { // move the tail first to make room for res1 copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:]) @@ -1891,8 +1872,6 @@ func (rc *runContainer16) iand(a container) container { } func (rc *runContainer16) inplaceIntersect(rc2 *runContainer16) container { - // TODO: optimize by doing less allocation, possibly? - // sect will be new sect := rc.intersect(rc2) *rc = *sect return rc @@ -1946,17 +1925,18 @@ func (rc *runContainer16) andNot(a container) container { panic("unsupported container type") } -func (rc *runContainer16) fillLeastSignificant16bits(x []uint32, i int, mask uint32) { - k := 0 - var val int64 +func (rc *runContainer16) fillLeastSignificant16bits(x []uint32, i int, mask uint32) int { + k := i + var val int for _, p := range rc.iv { n := p.runlen() - for j := int64(0); j < n; j++ { - val = int64(p.start) + j - x[k+i] = uint32(val) | mask + for j := int(0); j < n; j++ { + val = int(p.start) + j + x[k] = uint32(val) | mask k++ } } + return k } func (rc *runContainer16) getShortIterator() shortPeekable { @@ -1975,8 +1955,11 @@ func (rc *runContainer16) getManyIterator() manyIterable { // is still abe to express 2^16 because it is an int not an uint16. func (rc *runContainer16) iaddRange(firstOfRange, endx int) container { - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange", endx)) + if firstOfRange > endx { + panic(fmt.Sprintf("invalid %v = endx > firstOfRange", endx)) + } + if firstOfRange == endx { + return rc } addme := newRunContainer16TakeOwnership([]interval16{ { @@ -1990,10 +1973,13 @@ func (rc *runContainer16) iaddRange(firstOfRange, endx int) container { // remove the values in the range [firstOfRange,endx) func (rc *runContainer16) iremoveRange(firstOfRange, endx int) container { - if firstOfRange >= endx { + if firstOfRange > endx { panic(fmt.Sprintf("request to iremove empty set [%v, %v),"+ " nothing to do.", firstOfRange, endx)) - //return rc + } + // empty removal + if firstOfRange == endx { + return rc } x := newInterval16Range(uint16(firstOfRange), uint16(endx-1)) rc.isubtract(x) @@ -2002,8 +1988,8 @@ func (rc *runContainer16) iremoveRange(firstOfRange, endx int) container { // not flip the values in the range [firstOfRange,endx) func (rc *runContainer16) not(firstOfRange, endx int) container { - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange)) + if firstOfRange > endx { + panic(fmt.Sprintf("invalid %v = endx > firstOfRange = %v", endx, firstOfRange)) } return rc.Not(firstOfRange, endx) @@ -2023,8 +2009,8 @@ func (rc *runContainer16) not(firstOfRange, endx int) container { // func (rc *runContainer16) Not(firstOfRange, endx int) *runContainer16 { - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange == %v", endx, firstOfRange)) + if firstOfRange > endx { + panic(fmt.Sprintf("invalid %v = endx > firstOfRange == %v", endx, firstOfRange)) } if firstOfRange >= endx { @@ -2162,9 +2148,21 @@ func (rc *runContainer16) orBitmapContainerCardinality(bc *bitmapContainer) int // orArray finds the union of rc and ac. func (rc *runContainer16) orArray(ac *arrayContainer) container { - bc1 := newBitmapContainerFromRun(rc) - bc2 := ac.toBitmapContainer() - return bc1.orBitmap(bc2) + if ac.isEmpty() { + return rc.clone() + } + if rc.isEmpty() { + return ac.clone() + } + intervals, cardMinusOne := runArrayUnionToRuns(rc, ac) + result := newRunContainer16TakeOwnership(intervals) + if len(intervals) >= 2048 && cardMinusOne >= arrayDefaultMaxSize { + return newBitmapContainerFromRun(result) + } + if len(intervals)*2 > 1+int(cardMinusOne) { + return result.toArrayContainer() + } + return result } // orArray finds the union of rc and ac. @@ -2189,8 +2187,8 @@ func (rc *runContainer16) ior(a container) container { func (rc *runContainer16) inplaceUnion(rc2 *runContainer16) container { for _, p := range rc2.iv { - last := int64(p.last()) - for i := int64(p.start); i <= last; i++ { + last := int(p.last()) + for i := int(p.start); i <= last; i++ { rc.Add(uint16(i)) } } @@ -2207,13 +2205,88 @@ func (rc *runContainer16) iorBitmapContainer(bc *bitmapContainer) container { } func (rc *runContainer16) iorArray(ac *arrayContainer) container { - it := ac.getShortIterator() - for it.hasNext() { - rc.Add(it.next()) + if rc.isEmpty() { + return ac.clone() + } + if ac.isEmpty() { + return rc + } + var cardMinusOne uint16 + //TODO: perform the union algorithm in-place using rc.iv + // this can be done with methods like the in-place array container union + // but maybe lazily moving the remaining elements back. + rc.iv, cardMinusOne = runArrayUnionToRuns(rc, ac) + if len(rc.iv) >= 2048 && cardMinusOne >= arrayDefaultMaxSize { + return newBitmapContainerFromRun(rc) + } + if len(rc.iv)*2 > 1+int(cardMinusOne) { + return rc.toArrayContainer() } return rc } +func runArrayUnionToRuns(rc *runContainer16, ac *arrayContainer) ([]interval16, uint16) { + pos1 := 0 + pos2 := 0 + length1 := len(ac.content) + length2 := len(rc.iv) + target := make([]interval16, 0, len(rc.iv)) + // have to find the first range + // options are + // 1. from array container + // 2. from run container + var previousInterval interval16 + var cardMinusOne uint16 + if ac.content[0] < rc.iv[0].start { + previousInterval.start = ac.content[0] + previousInterval.length = 0 + pos1++ + } else { + previousInterval.start = rc.iv[0].start + previousInterval.length = rc.iv[0].length + pos2++ + } + + for pos1 < length1 || pos2 < length2 { + if pos1 < length1 { + s1 := ac.content[pos1] + if s1 <= previousInterval.start+previousInterval.length { + pos1++ + continue + } + if previousInterval.last() < MaxUint16 && previousInterval.last()+1 == s1 { + previousInterval.length++ + pos1++ + continue + } + } + if pos2 < length2 { + range2 := rc.iv[pos2] + if range2.start <= previousInterval.last() || range2.start > 0 && range2.start-1 == previousInterval.last() { + pos2++ + if previousInterval.last() < range2.last() { + previousInterval.length = range2.last() - previousInterval.start + } + continue + } + } + cardMinusOne += previousInterval.length + 1 + target = append(target, previousInterval) + if pos2 == length2 || pos1 < length1 && ac.content[pos1] < rc.iv[pos2].start { + previousInterval.start = ac.content[pos1] + previousInterval.length = 0 + pos1++ + } else { + previousInterval = rc.iv[pos2] + pos2++ + } + } + cardMinusOne += previousInterval.length + 1 + target = append(target, previousInterval) + + return target, cardMinusOne +} + // lazyIOR is described (not yet implemented) in // this nice note from @lemire on // https://github.com/RoaringBitmap/roaring/pull/70#issuecomment-263613737 @@ -2269,9 +2342,9 @@ func (rc *runContainer16) lazyOR(a container) container { } func (rc *runContainer16) intersects(a container) bool { - // TODO: optimize by doing inplace/less allocation, possibly? + // TODO: optimize by doing inplace/less allocation isect := rc.and(a) - return isect.getCardinality() > 0 + return !isect.isEmpty() } func (rc *runContainer16) xor(a container) container { @@ -2300,44 +2373,51 @@ func (rc *runContainer16) iandNot(a container) container { // flip the values in the range [firstOfRange,endx) func (rc *runContainer16) inot(firstOfRange, endx int) container { - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange)) + if firstOfRange > endx { + panic(fmt.Sprintf("invalid %v = endx > firstOfRange = %v", endx, firstOfRange)) + } + if firstOfRange > endx { + return rc } // TODO: minimize copies, do it all inplace; not() makes a copy. rc = rc.Not(firstOfRange, endx) return rc } -func (rc *runContainer16) getCardinality() int { - return int(rc.cardinality()) -} - func (rc *runContainer16) rank(x uint16) int { - n := int64(len(rc.iv)) - xx := int64(x) - w, already, _ := rc.search(xx, nil) + n := int(len(rc.iv)) + xx := int(x) + w, already, _ := rc.search(xx) if w < 0 { return 0 } if !already && w == n-1 { return rc.getCardinality() } - var rnk int64 + var rnk int if !already { - for i := int64(0); i <= w; i++ { + for i := int(0); i <= w; i++ { rnk += rc.iv[i].runlen() } return int(rnk) } - for i := int64(0); i < w; i++ { + for i := int(0); i < w; i++ { rnk += rc.iv[i].runlen() } - rnk += int64(x-rc.iv[w].start) + 1 + rnk += int(x-rc.iv[w].start) + 1 return int(rnk) } func (rc *runContainer16) selectInt(x uint16) int { - return rc.selectInt16(x) + var offset int + for k := range rc.iv { + nextOffset := offset + rc.iv[k].runlen() + if nextOffset > int(x) { + return int(int(rc.iv[k].start) + (int(x) - offset)) + } + offset = nextOffset + } + panic("cannot select x") } func (rc *runContainer16) andNotRunContainer16(b *runContainer16) container { @@ -2415,11 +2495,9 @@ func (rc *runContainer16) xorBitmap(bc *bitmapContainer) container { // convert to bitmap or array *if needed* func (rc *runContainer16) toEfficientContainer() container { - - // runContainer16SerializedSizeInBytes(numRuns) sizeAsRunContainer := rc.getSizeInBytes() sizeAsBitmapContainer := bitmapContainerSizeInBytes() - card := int(rc.cardinality()) + card := rc.getCardinality() sizeAsArrayContainer := arrayContainerSizeInBytes(card) if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) { return rc diff --git a/vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go deleted file mode 100644 index 84537d087..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go +++ /dev/null @@ -1,1104 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import "github.com/tinylib/msgp/msgp" - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *addHelper16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zbai uint32 - zbai, err = dc.ReadMapHeader() - if err != nil { - return - } - for zbai > 0 { - zbai-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "runstart": - z.runstart, err = dc.ReadUint16() - if err != nil { - return - } - case "runlen": - z.runlen, err = dc.ReadUint16() - if err != nil { - return - } - case "actuallyAdded": - z.actuallyAdded, err = dc.ReadUint16() - if err != nil { - return - } - case "m": - var zcmr uint32 - zcmr, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.m) >= int(zcmr) { - z.m = (z.m)[:zcmr] - } else { - z.m = make([]interval16, zcmr) - } - for zxvk := range z.m { - var zajw uint32 - zajw, err = dc.ReadMapHeader() - if err != nil { - return - } - for zajw > 0 { - zajw-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.m[zxvk].start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.m[zxvk].length, err = dc.ReadUint16() - z.m[zxvk].length -= z.m[zxvk].start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "rc": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - var zwht uint32 - zwht, err = dc.ReadMapHeader() - if err != nil { - return - } - for zwht > 0 { - zwht-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zhct uint32 - zhct, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.rc.iv) >= int(zhct) { - z.rc.iv = (z.rc.iv)[:zhct] - } else { - z.rc.iv = make([]interval16, zhct) - } - for zbzg := range z.rc.iv { - var zcua uint32 - zcua, err = dc.ReadMapHeader() - if err != nil { - return - } - for zcua > 0 { - zcua-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.rc.iv[zbzg].start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.rc.iv[zbzg].length, err = dc.ReadUint16() - z.rc.iv[zbzg].length -= z.rc.iv[zbzg].start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "card": - z.rc.card, err = dc.ReadInt64() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *addHelper16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 5 - // write "runstart" - err = en.Append(0x85, 0xa8, 0x72, 0x75, 0x6e, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.runstart) - if err != nil { - return - } - // write "runlen" - err = en.Append(0xa6, 0x72, 0x75, 0x6e, 0x6c, 0x65, 0x6e) - if err != nil { - return err - } - err = en.WriteUint16(z.runlen) - if err != nil { - return - } - // write "actuallyAdded" - err = en.Append(0xad, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x64, 0x64, 0x65, 0x64) - if err != nil { - return err - } - err = en.WriteUint16(z.actuallyAdded) - if err != nil { - return - } - // write "m" - err = en.Append(0xa1, 0x6d) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.m))) - if err != nil { - return - } - for zxvk := range z.m { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.m[zxvk].start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.m[zxvk].last()) - if err != nil { - return - } - } - // write "rc" - err = en.Append(0xa2, 0x72, 0x63) - if err != nil { - return err - } - if z.rc == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - // map header, size 2 - // write "iv" - err = en.Append(0x82, 0xa2, 0x69, 0x76) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.rc.iv))) - if err != nil { - return - } - for zbzg := range z.rc.iv { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.rc.iv[zbzg].start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.rc.iv[zbzg].last()) - if err != nil { - return - } - } - // write "card" - err = en.Append(0xa4, 0x63, 0x61, 0x72, 0x64) - if err != nil { - return err - } - err = en.WriteInt64(z.rc.card) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *addHelper16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 5 - // string "runstart" - o = append(o, 0x85, 0xa8, 0x72, 0x75, 0x6e, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.runstart) - // string "runlen" - o = append(o, 0xa6, 0x72, 0x75, 0x6e, 0x6c, 0x65, 0x6e) - o = msgp.AppendUint16(o, z.runlen) - // string "actuallyAdded" - o = append(o, 0xad, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x64, 0x64, 0x65, 0x64) - o = msgp.AppendUint16(o, z.actuallyAdded) - // string "m" - o = append(o, 0xa1, 0x6d) - o = msgp.AppendArrayHeader(o, uint32(len(z.m))) - for zxvk := range z.m { - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.m[zxvk].start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.m[zxvk].last()) - } - // string "rc" - o = append(o, 0xa2, 0x72, 0x63) - if z.rc == nil { - o = msgp.AppendNil(o) - } else { - // map header, size 2 - // string "iv" - o = append(o, 0x82, 0xa2, 0x69, 0x76) - o = msgp.AppendArrayHeader(o, uint32(len(z.rc.iv))) - for zbzg := range z.rc.iv { - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.rc.iv[zbzg].start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.rc.iv[zbzg].last()) - } - // string "card" - o = append(o, 0xa4, 0x63, 0x61, 0x72, 0x64) - o = msgp.AppendInt64(o, z.rc.card) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *addHelper16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zxhx uint32 - zxhx, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zxhx > 0 { - zxhx-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "runstart": - z.runstart, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "runlen": - z.runlen, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "actuallyAdded": - z.actuallyAdded, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "m": - var zlqf uint32 - zlqf, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.m) >= int(zlqf) { - z.m = (z.m)[:zlqf] - } else { - z.m = make([]interval16, zlqf) - } - for zxvk := range z.m { - var zdaf uint32 - zdaf, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zdaf > 0 { - zdaf-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.m[zxvk].start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.m[zxvk].length, bts, err = msgp.ReadUint16Bytes(bts) - z.m[zxvk].length -= z.m[zxvk].start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "rc": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - var zpks uint32 - zpks, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zpks > 0 { - zpks-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zjfb uint32 - zjfb, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.rc.iv) >= int(zjfb) { - z.rc.iv = (z.rc.iv)[:zjfb] - } else { - z.rc.iv = make([]interval16, zjfb) - } - for zbzg := range z.rc.iv { - var zcxo uint32 - zcxo, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zcxo > 0 { - zcxo-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.rc.iv[zbzg].start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.rc.iv[zbzg].length, bts, err = msgp.ReadUint16Bytes(bts) - z.rc.iv[zbzg].length -= z.rc.iv[zbzg].start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "card": - z.rc.card, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *addHelper16) Msgsize() (s int) { - s = 1 + 9 + msgp.Uint16Size + 7 + msgp.Uint16Size + 14 + msgp.Uint16Size + 2 + msgp.ArrayHeaderSize + (len(z.m) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 3 - if z.rc == nil { - s += msgp.NilSize - } else { - s += 1 + 3 + msgp.ArrayHeaderSize + (len(z.rc.iv) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 5 + msgp.Int64Size - } - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *interval16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zeff uint32 - zeff, err = dc.ReadMapHeader() - if err != nil { - return - } - for zeff > 0 { - zeff-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.length, err = dc.ReadUint16() - z.length = -z.start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z interval16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.last()) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z interval16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.last()) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *interval16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zrsw uint32 - zrsw, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zrsw > 0 { - zrsw-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.length, bts, err = msgp.ReadUint16Bytes(bts) - z.length -= z.start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z interval16) Msgsize() (s int) { - s = 1 + 6 + msgp.Uint16Size + 5 + msgp.Uint16Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *runContainer16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zdnj uint32 - zdnj, err = dc.ReadMapHeader() - if err != nil { - return - } - for zdnj > 0 { - zdnj-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zobc uint32 - zobc, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.iv) >= int(zobc) { - z.iv = (z.iv)[:zobc] - } else { - z.iv = make([]interval16, zobc) - } - for zxpk := range z.iv { - var zsnv uint32 - zsnv, err = dc.ReadMapHeader() - if err != nil { - return - } - for zsnv > 0 { - zsnv-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.iv[zxpk].start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.iv[zxpk].length, err = dc.ReadUint16() - z.iv[zxpk].length -= z.iv[zxpk].start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "card": - z.card, err = dc.ReadInt64() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *runContainer16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "iv" - err = en.Append(0x82, 0xa2, 0x69, 0x76) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.iv))) - if err != nil { - return - } - for zxpk := range z.iv { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.iv[zxpk].start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.iv[zxpk].last()) - if err != nil { - return - } - } - // write "card" - err = en.Append(0xa4, 0x63, 0x61, 0x72, 0x64) - if err != nil { - return err - } - err = en.WriteInt64(z.card) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *runContainer16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "iv" - o = append(o, 0x82, 0xa2, 0x69, 0x76) - o = msgp.AppendArrayHeader(o, uint32(len(z.iv))) - for zxpk := range z.iv { - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.iv[zxpk].start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.iv[zxpk].last()) - } - // string "card" - o = append(o, 0xa4, 0x63, 0x61, 0x72, 0x64) - o = msgp.AppendInt64(o, z.card) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *runContainer16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zkgt uint32 - zkgt, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zkgt > 0 { - zkgt-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zema uint32 - zema, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.iv) >= int(zema) { - z.iv = (z.iv)[:zema] - } else { - z.iv = make([]interval16, zema) - } - for zxpk := range z.iv { - var zpez uint32 - zpez, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zpez > 0 { - zpez-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.iv[zxpk].start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.iv[zxpk].length, bts, err = msgp.ReadUint16Bytes(bts) - z.iv[zxpk].length -= z.iv[zxpk].start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "card": - z.card, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *runContainer16) Msgsize() (s int) { - s = 1 + 3 + msgp.ArrayHeaderSize + (len(z.iv) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 5 + msgp.Int64Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *runIterator16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zqke uint32 - zqke, err = dc.ReadMapHeader() - if err != nil { - return - } - for zqke > 0 { - zqke-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "rc": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - err = z.rc.DecodeMsg(dc) - if err != nil { - return - } - } - case "curIndex": - z.curIndex, err = dc.ReadInt64() - if err != nil { - return - } - case "curPosInIndex": - z.curPosInIndex, err = dc.ReadUint16() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *runIterator16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "rc" - err = en.Append(0x83, 0xa2, 0x72, 0x63) - if err != nil { - return err - } - if z.rc == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = z.rc.EncodeMsg(en) - if err != nil { - return - } - } - // write "curIndex" - err = en.Append(0xa8, 0x63, 0x75, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78) - if err != nil { - return err - } - err = en.WriteInt64(z.curIndex) - if err != nil { - return - } - // write "curPosInIndex" - err = en.Append(0xad, 0x63, 0x75, 0x72, 0x50, 0x6f, 0x73, 0x49, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78) - if err != nil { - return err - } - err = en.WriteUint16(z.curPosInIndex) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *runIterator16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "rc" - o = append(o, 0x83, 0xa2, 0x72, 0x63) - if z.rc == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.rc.MarshalMsg(o) - if err != nil { - return - } - } - // string "curIndex" - o = append(o, 0xa8, 0x63, 0x75, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78) - o = msgp.AppendInt64(o, z.curIndex) - // string "curPosInIndex" - o = append(o, 0xad, 0x63, 0x75, 0x72, 0x50, 0x6f, 0x73, 0x49, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78) - o = msgp.AppendUint16(o, z.curPosInIndex) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *runIterator16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zqyh uint32 - zqyh, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zqyh > 0 { - zqyh-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "rc": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - bts, err = z.rc.UnmarshalMsg(bts) - if err != nil { - return - } - } - case "curIndex": - z.curIndex, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - case "curPosInIndex": - z.curPosInIndex, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *runIterator16) Msgsize() (s int) { - s = 1 + 3 - if z.rc == nil { - s += msgp.NilSize - } else { - s += z.rc.Msgsize() - } - s += 9 + msgp.Int64Size + 14 + msgp.Uint16Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *uint16Slice) DecodeMsg(dc *msgp.Reader) (err error) { - var zjpj uint32 - zjpj, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap((*z)) >= int(zjpj) { - (*z) = (*z)[:zjpj] - } else { - (*z) = make(uint16Slice, zjpj) - } - for zywj := range *z { - (*z)[zywj], err = dc.ReadUint16() - if err != nil { - return - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z uint16Slice) EncodeMsg(en *msgp.Writer) (err error) { - err = en.WriteArrayHeader(uint32(len(z))) - if err != nil { - return - } - for zzpf := range z { - err = en.WriteUint16(z[zzpf]) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z uint16Slice) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - o = msgp.AppendArrayHeader(o, uint32(len(z))) - for zzpf := range z { - o = msgp.AppendUint16(o, z[zzpf]) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *uint16Slice) UnmarshalMsg(bts []byte) (o []byte, err error) { - var zgmo uint32 - zgmo, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap((*z)) >= int(zgmo) { - (*z) = (*z)[:zgmo] - } else { - (*z) = make(uint16Slice, zgmo) - } - for zrfe := range *z { - (*z)[zrfe], bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z uint16Slice) Msgsize() (s int) { - s = msgp.ArrayHeaderSize + (len(z) * (msgp.Uint16Size)) - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization.go b/vendor/github.com/RoaringBitmap/roaring/serialization.go index 7b7ed29b0..70e3bbcc5 100644 --- a/vendor/github.com/RoaringBitmap/roaring/serialization.go +++ b/vendor/github.com/RoaringBitmap/roaring/serialization.go @@ -3,8 +3,6 @@ package roaring import ( "encoding/binary" "io" - - "github.com/tinylib/msgp/msgp" ) // writeTo for runContainer16 follows this @@ -19,16 +17,3 @@ func (b *runContainer16) writeTo(stream io.Writer) (int, error) { } return stream.Write(buf) } - -func (b *runContainer16) writeToMsgpack(stream io.Writer) (int, error) { - bts, err := b.MarshalMsg(nil) - if err != nil { - return 0, err - } - return stream.Write(bts) -} - -func (b *runContainer16) readFromMsgpack(stream io.Reader) (int, error) { - err := msgp.Decode(stream, b) - return 0, err -} diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization_generic.go b/vendor/github.com/RoaringBitmap/roaring/serialization_generic.go index 4b9d9e3d4..90a336cda 100644 --- a/vendor/github.com/RoaringBitmap/roaring/serialization_generic.go +++ b/vendor/github.com/RoaringBitmap/roaring/serialization_generic.go @@ -1,4 +1,4 @@ -// +build !amd64,!386 appengine +// +build !amd64,!386,!arm,!arm64,!ppc64le,!mipsle,!mips64le,!mips64p32le,!wasm appengine package roaring diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go index 818a06c80..221e173fe 100644 --- a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go +++ b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go @@ -1,8 +1,9 @@ -// +build 386 amd64,!appengine +// +build 386,!appengine amd64,!appengine arm,!appengine arm64,!appengine ppc64le,!appengine mipsle,!appengine mips64le,!appengine mips64p32le,!appengine wasm,!appengine package roaring import ( + "encoding/binary" "errors" "io" "reflect" @@ -132,3 +133,285 @@ func byteSliceAsInterval16Slice(slice []byte) (result []interval16) { // return result return } + +// FromBuffer creates a bitmap from its serialized version stored in buffer. +// It uses CRoaring's frozen bitmap format. +// +// The format specification is available here: +// https://github.com/RoaringBitmap/CRoaring/blob/2c867e9f9c9e2a3a7032791f94c4c7ae3013f6e0/src/roaring.c#L2756-L2783 +// +// The provided byte array (buf) is expected to be a constant. +// The function makes the best effort attempt not to copy data. +// Only little endian is supported. The function will err if it detects a big +// endian serialized file. +// You should take care not to modify buff as it will likely result in +// unexpected program behavior. +// If said buffer comes from a memory map, it's advisable to give it read +// only permissions, either at creation or by calling Mprotect from the +// golang.org/x/sys/unix package. +// +// Resulting bitmaps are effectively immutable in the following sense: +// a copy-on-write marker is used so that when you modify the resulting +// bitmap, copies of selected data (containers) are made. +// You should *not* change the copy-on-write status of the resulting +// bitmaps (SetCopyOnWrite). +// +// If buf becomes unavailable, then a bitmap created with +// FromBuffer would be effectively broken. Furthermore, any +// bitmap derived from this bitmap (e.g., via Or, And) might +// also be broken. Thus, before making buf unavailable, you should +// call CloneCopyOnWriteContainers on all such bitmaps. +// +func (rb *Bitmap) FrozenView(buf []byte) error { + return rb.highlowcontainer.frozenView(buf) +} + +/* Verbatim specification from CRoaring. + * + * FROZEN SERIALIZATION FORMAT DESCRIPTION + * + * -- (beginning must be aligned by 32 bytes) -- + * uint64_t[BITSET_CONTAINER_SIZE_IN_WORDS * num_bitset_containers] + * rle16_t[total number of rle elements in all run containers] + * uint16_t[total number of array elements in all array containers] + * uint16_t[num_containers] + * uint16_t[num_containers] + * uint8_t[num_containers] + *
uint32_t + * + *
is a 4-byte value which is a bit union of FROZEN_COOKIE (15 bits) + * and the number of containers (17 bits). + * + * stores number of elements for every container. + * Its meaning depends on container type. + * For array and bitset containers, this value is the container cardinality minus one. + * For run container, it is the number of rle_t elements (n_runs). + * + * ,, are flat arrays of elements of + * all containers of respective type. + * + * <*_data> and are kept close together because they are not accessed + * during deserilization. This may reduce IO in case of large mmaped bitmaps. + * All members have their native alignments during deserilization except
, + * which is not guaranteed to be aligned by 4 bytes. + */ +const FROZEN_COOKIE = 13766 + +var ( + FrozenBitmapInvalidCookie = errors.New("header does not contain the FROZEN_COOKIE") + FrozenBitmapBigEndian = errors.New("loading big endian frozen bitmaps is not supported") + FrozenBitmapIncomplete = errors.New("input buffer too small to contain a frozen bitmap") + FrozenBitmapOverpopulated = errors.New("too many containers") + FrozenBitmapUnexpectedData = errors.New("spurious data in input") + FrozenBitmapInvalidTypecode = errors.New("unrecognized typecode") + FrozenBitmapBufferTooSmall = errors.New("buffer too small") +) + +func (ra *roaringArray) frozenView(buf []byte) error { + if len(buf) < 4 { + return FrozenBitmapIncomplete + } + + headerBE := binary.BigEndian.Uint32(buf[len(buf)-4:]) + if headerBE & 0x7fff == FROZEN_COOKIE { + return FrozenBitmapBigEndian + } + + header := binary.LittleEndian.Uint32(buf[len(buf)-4:]) + buf = buf[:len(buf)-4] + + if header & 0x7fff != FROZEN_COOKIE { + return FrozenBitmapInvalidCookie + } + + nCont := int(header >> 15) + if nCont > (1 << 16) { + return FrozenBitmapOverpopulated + } + + // 1 byte per type, 2 bytes per key, 2 bytes per count. + if len(buf) < 5*nCont { + return FrozenBitmapIncomplete + } + + types := buf[len(buf)-nCont:] + buf = buf[:len(buf)-nCont] + + counts := byteSliceAsUint16Slice(buf[len(buf)-2*nCont:]) + buf = buf[:len(buf)-2*nCont] + + keys := byteSliceAsUint16Slice(buf[len(buf)-2*nCont:]) + buf = buf[:len(buf)-2*nCont] + + nBitmap, nArray, nRun := uint64(0), uint64(0), uint64(0) + nArrayEl, nRunEl := uint64(0), uint64(0) + for i, t := range types { + switch (t) { + case 1: + nBitmap++ + case 2: + nArray++ + nArrayEl += uint64(counts[i])+1 + case 3: + nRun++ + nRunEl += uint64(counts[i]) + default: + return FrozenBitmapInvalidTypecode + } + } + + if uint64(len(buf)) < (1 << 13)*nBitmap + 4*nRunEl + 2*nArrayEl { + return FrozenBitmapIncomplete + } + + bitsetsArena := byteSliceAsUint64Slice(buf[:(1 << 13)*nBitmap]) + buf = buf[(1 << 13)*nBitmap:] + + runsArena := byteSliceAsInterval16Slice(buf[:4*nRunEl]) + buf = buf[4*nRunEl:] + + arraysArena := byteSliceAsUint16Slice(buf[:2*nArrayEl]) + buf = buf[2*nArrayEl:] + + if len(buf) != 0 { + return FrozenBitmapUnexpectedData + } + + // TODO: maybe arena_alloc all this. + containers := make([]container, nCont) + bitsets := make([]bitmapContainer, nBitmap) + arrays := make([]arrayContainer, nArray) + runs := make([]runContainer16, nRun) + needCOW := make([]bool, nCont) + + iBitset, iArray, iRun := uint64(0), uint64(0), uint64(0) + for i, t := range types { + needCOW[i] = true + + switch (t) { + case 1: + containers[i] = &bitsets[iBitset] + bitsets[iBitset].cardinality = int(counts[i])+1 + bitsets[iBitset].bitmap = bitsetsArena[:1024] + bitsetsArena = bitsetsArena[1024:] + iBitset++ + case 2: + containers[i] = &arrays[iArray] + sz := int(counts[i])+1 + arrays[iArray].content = arraysArena[:sz] + arraysArena = arraysArena[sz:] + iArray++ + case 3: + containers[i] = &runs[iRun] + runs[iRun].iv = runsArena[:counts[i]] + runsArena = runsArena[counts[i]:] + iRun++ + } + } + + // Not consuming the full input is a bug. + if iBitset != nBitmap || len(bitsetsArena) != 0 || + iArray != nArray || len(arraysArena) != 0 || + iRun != nRun || len(runsArena) != 0 { + panic("we missed something") + } + + ra.keys = keys + ra.containers = containers + ra.needCopyOnWrite = needCOW + ra.copyOnWrite = true + + return nil +} + +func (bm *Bitmap) GetFrozenSizeInBytes() uint64 { + nBits, nArrayEl, nRunEl := uint64(0), uint64(0), uint64(0) + for _, c := range bm.highlowcontainer.containers { + switch v := c.(type) { + case *bitmapContainer: + nBits++ + case *arrayContainer: + nArrayEl += uint64(len(v.content)) + case *runContainer16: + nRunEl += uint64(len(v.iv)) + } + } + return 4 + 5*uint64(len(bm.highlowcontainer.containers)) + + (nBits << 13) + 2*nArrayEl + 4*nRunEl +} + +func (bm *Bitmap) Freeze() ([]byte, error) { + sz := bm.GetFrozenSizeInBytes() + buf := make([]byte, sz) + _, err := bm.FreezeTo(buf) + return buf, err +} + +func (bm *Bitmap) FreezeTo(buf []byte) (int, error) { + containers := bm.highlowcontainer.containers + nCont := len(containers) + + nBits, nArrayEl, nRunEl := 0, 0, 0 + for _, c := range containers { + switch v := c.(type) { + case *bitmapContainer: + nBits++ + case *arrayContainer: + nArrayEl += len(v.content) + case *runContainer16: + nRunEl += len(v.iv) + } + } + + serialSize := 4 + 5*nCont + (1 << 13)*nBits + 4*nRunEl + 2*nArrayEl + if len(buf) < serialSize { + return 0, FrozenBitmapBufferTooSmall + } + + bitsArena := byteSliceAsUint64Slice(buf[:(1 << 13)*nBits]) + buf = buf[(1 << 13)*nBits:] + + runsArena := byteSliceAsInterval16Slice(buf[:4*nRunEl]) + buf = buf[4*nRunEl:] + + arraysArena := byteSliceAsUint16Slice(buf[:2*nArrayEl]) + buf = buf[2*nArrayEl:] + + keys := byteSliceAsUint16Slice(buf[:2*nCont]) + buf = buf[2*nCont:] + + counts := byteSliceAsUint16Slice(buf[:2*nCont]) + buf = buf[2*nCont:] + + types := buf[:nCont] + buf = buf[nCont:] + + header := uint32(FROZEN_COOKIE|(nCont << 15)) + binary.LittleEndian.PutUint32(buf[:4], header) + + copy(keys, bm.highlowcontainer.keys[:]) + + for i, c := range containers { + switch v := c.(type) { + case *bitmapContainer: + copy(bitsArena, v.bitmap) + bitsArena = bitsArena[1024:] + counts[i] = uint16(v.cardinality-1) + types[i] = 1 + case *arrayContainer: + copy(arraysArena, v.content) + arraysArena = arraysArena[len(v.content):] + elems := len(v.content) + counts[i] = uint16(elems-1) + types[i] = 2 + case *runContainer16: + copy(runsArena, v.iv) + runs := len(v.iv) + runsArena = runsArena[runs:] + counts[i] = uint16(runs) + types[i] = 3 + } + } + + return serialSize, nil +} diff --git a/vendor/github.com/RoaringBitmap/roaring/setutil.go b/vendor/github.com/RoaringBitmap/roaring/setutil.go index 2fe815141..663c4fa37 100644 --- a/vendor/github.com/RoaringBitmap/roaring/setutil.go +++ b/vendor/github.com/RoaringBitmap/roaring/setutil.go @@ -135,66 +135,6 @@ func exclusiveUnion2by2(set1 []uint16, set2 []uint16, buffer []uint16) int { return pos } -func union2by2(set1 []uint16, set2 []uint16, buffer []uint16) int { - pos := 0 - k1 := 0 - k2 := 0 - if 0 == len(set2) { - buffer = buffer[:len(set1)] - copy(buffer, set1[:]) - return len(set1) - } - if 0 == len(set1) { - buffer = buffer[:len(set2)] - copy(buffer, set2[:]) - return len(set2) - } - s1 := set1[k1] - s2 := set2[k2] - buffer = buffer[:cap(buffer)] - for { - if s1 < s2 { - buffer[pos] = s1 - pos++ - k1++ - if k1 >= len(set1) { - copy(buffer[pos:], set2[k2:]) - pos += len(set2) - k2 - break - } - s1 = set1[k1] - } else if s1 == s2 { - buffer[pos] = s1 - pos++ - k1++ - k2++ - if k1 >= len(set1) { - copy(buffer[pos:], set2[k2:]) - pos += len(set2) - k2 - break - } - if k2 >= len(set2) { - copy(buffer[pos:], set1[k1:]) - pos += len(set1) - k1 - break - } - s1 = set1[k1] - s2 = set2[k2] - } else { // if (set1[k1]>set2[k2]) - buffer[pos] = s2 - pos++ - k2++ - if k2 >= len(set2) { - copy(buffer[pos:], set1[k1:]) - pos += len(set1) - k1 - break - } - s2 = set2[k2] - } - } - return pos -} - func union2by2Cardinality(set1 []uint16, set2 []uint16) int { pos := 0 k1 := 0 diff --git a/vendor/github.com/RoaringBitmap/roaring/setutil_arm64.go b/vendor/github.com/RoaringBitmap/roaring/setutil_arm64.go new file mode 100644 index 000000000..debca813c --- /dev/null +++ b/vendor/github.com/RoaringBitmap/roaring/setutil_arm64.go @@ -0,0 +1,6 @@ +// +build arm64,!gccgo,!appengine + +package roaring + +//go:noescape +func union2by2(set1 []uint16, set2 []uint16, buffer []uint16) (size int) diff --git a/vendor/github.com/RoaringBitmap/roaring/setutil_arm64.s b/vendor/github.com/RoaringBitmap/roaring/setutil_arm64.s new file mode 100644 index 000000000..e4f0f2047 --- /dev/null +++ b/vendor/github.com/RoaringBitmap/roaring/setutil_arm64.s @@ -0,0 +1,132 @@ +// +build arm64,!gccgo,!appengine + +#include "textflag.h" + + +// This implements union2by2 using golang's version of arm64 assembly +// The algorithm is very similar to the generic one, +// but makes better use of arm64 features so is notably faster. +// The basic algorithm structure is as follows: +// 1. If either set is empty, copy the other set into the buffer and return the length +// 2. Otherwise, load the first element of each set into a variable (s1 and s2). +// 3. a. Compare the values of s1 and s2. + // b. add the smaller one to the buffer. + // c. perform a bounds check before incrementing. + // If one set is finished, copy the rest of the other set over. + // d. update s1 and or s2 to the next value, continue loop. + // + // Past the fact of the algorithm, this code makes use of several arm64 features + // Condition Codes: + // arm64's CMP operation sets 4 bits that can be used for branching, + // rather than just true or false. + // As a consequence, a single comparison gives enough information to distinguish the three cases + // + // Post-increment pointers after load/store: + // Instructions like `MOVHU.P 2(R0), R6` + // increment the register by a specified amount, in this example 2. + // Because uint16's are exactly 2 bytes and the length of the slices + // is part of the slice header, + // there is no need to separately track the index into the slice. + // Instead, the code can calculate the final read value and compare against that, + // using the post-increment reads to move the pointers along. + // + // TODO: CALL out to memmove once the list is exhausted. + // Right now it moves the necessary shorts so that the remaining count + // is a multiple of 4 and then copies 64 bits at a time. + +TEXT ·union2by2(SB), NOSPLIT, $0-80 + // R0, R1, and R2 for the pointers to the three slices + MOVD set1+0(FP), R0 + MOVD set2+24(FP), R1 + MOVD buffer+48(FP), R2 + + //R3 and R4 will be the values at which we will have finished reading set1 and set2. + // R3 should be R0 + 2 * set1_len+8(FP) + MOVD set1_len+8(FP), R3 + MOVD set2_len+32(FP), R4 + + ADD R3<<1, R0, R3 + ADD R4<<1, R1, R4 + + + //Rather than counting the number of elements added separately + //Save the starting register of buffer. + MOVD buffer+48(FP), R5 + + // set1 is empty, just flush set2 + CMP R0, R3 + BEQ flush_right + + // set2 is empty, just flush set1 + CMP R1, R4 + BEQ flush_left + + // R6, R7 are the working space for s1 and s2 + MOVD ZR, R6 + MOVD ZR, R7 + + MOVHU.P 2(R0), R6 + MOVHU.P 2(R1), R7 +loop: + + CMP R6, R7 + BEQ pop_both // R6 == R7 + BLS pop_right // R6 > R7 +//pop_left: // R6 < R7 + MOVHU.P R6, 2(R2) + CMP R0, R3 + BEQ pop_then_flush_right + MOVHU.P 2(R0), R6 + JMP loop +pop_both: + MOVHU.P R6, 2(R2) //could also use R7, since they are equal + CMP R0, R3 + BEQ flush_right + CMP R1, R4 + BEQ flush_left + MOVHU.P 2(R0), R6 + MOVHU.P 2(R1), R7 + JMP loop +pop_right: + MOVHU.P R7, 2(R2) + CMP R1, R4 + BEQ pop_then_flush_left + MOVHU.P 2(R1), R7 + JMP loop + +pop_then_flush_right: + MOVHU.P R7, 2(R2) +flush_right: + MOVD R1, R0 + MOVD R4, R3 + JMP flush_left +pop_then_flush_left: + MOVHU.P R6, 2(R2) +flush_left: + CMP R0, R3 + BEQ return + //figure out how many bytes to slough off. Must be a multiple of two + SUB R0, R3, R4 + ANDS $6, R4 + BEQ long_flush //handles the 0 mod 8 case + SUBS $4, R4, R4 // since possible values are 2, 4, 6, this splits evenly + BLT pop_single // exactly the 2 case + MOVW.P 4(R0), R6 + MOVW.P R6, 4(R2) + BEQ long_flush // we're now aligned by 64 bits, as R4==4, otherwise 2 more +pop_single: + MOVHU.P 2(R0), R6 + MOVHU.P R6, 2(R2) +long_flush: + // at this point we know R3 - R0 is a multiple of 8. + CMP R0, R3 + BEQ return + MOVD.P 8(R0), R6 + MOVD.P R6, 8(R2) + JMP long_flush +return: + // number of shorts written is (R5 - R2) >> 1 + SUB R5, R2 + LSR $1, R2, R2 + MOVD R2, size+72(FP) + RET diff --git a/vendor/github.com/RoaringBitmap/roaring/setutil_generic.go b/vendor/github.com/RoaringBitmap/roaring/setutil_generic.go new file mode 100644 index 000000000..9edcc9025 --- /dev/null +++ b/vendor/github.com/RoaringBitmap/roaring/setutil_generic.go @@ -0,0 +1,63 @@ +// +build !arm64 gccgo appengine + +package roaring + +func union2by2(set1 []uint16, set2 []uint16, buffer []uint16) int { + pos := 0 + k1 := 0 + k2 := 0 + if 0 == len(set2) { + buffer = buffer[:len(set1)] + copy(buffer, set1[:]) + return len(set1) + } + if 0 == len(set1) { + buffer = buffer[:len(set2)] + copy(buffer, set2[:]) + return len(set2) + } + s1 := set1[k1] + s2 := set2[k2] + buffer = buffer[:cap(buffer)] + for { + if s1 < s2 { + buffer[pos] = s1 + pos++ + k1++ + if k1 >= len(set1) { + copy(buffer[pos:], set2[k2:]) + pos += len(set2) - k2 + break + } + s1 = set1[k1] + } else if s1 == s2 { + buffer[pos] = s1 + pos++ + k1++ + k2++ + if k1 >= len(set1) { + copy(buffer[pos:], set2[k2:]) + pos += len(set2) - k2 + break + } + if k2 >= len(set2) { + copy(buffer[pos:], set1[k1:]) + pos += len(set1) - k1 + break + } + s1 = set1[k1] + s2 = set2[k2] + } else { // if (set1[k1]>set2[k2]) + buffer[pos] = s2 + pos++ + k2++ + if k2 >= len(set2) { + copy(buffer[pos:], set1[k1:]) + pos += len(set1) - k1 + break + } + s2 = set2[k2] + } + } + return pos +} diff --git a/vendor/github.com/RoaringBitmap/roaring/smat.go b/vendor/github.com/RoaringBitmap/roaring/smat.go index 9da475634..972cd244d 100644 --- a/vendor/github.com/RoaringBitmap/roaring/smat.go +++ b/vendor/github.com/RoaringBitmap/roaring/smat.go @@ -63,7 +63,7 @@ import ( "sort" "github.com/mschoch/smat" - "github.com/willf/bitset" + "github.com/bits-and-blooms/bitset" ) // fuzz test using state machine driven by byte stream. diff --git a/vendor/github.com/RoaringBitmap/roaring/util.go b/vendor/github.com/RoaringBitmap/roaring/util.go index 676303391..48b9d5a10 100644 --- a/vendor/github.com/RoaringBitmap/roaring/util.go +++ b/vendor/github.com/RoaringBitmap/roaring/util.go @@ -1,6 +1,7 @@ package roaring import ( + "math" "math/rand" "sort" ) @@ -15,7 +16,7 @@ const ( noOffsetThreshold = 4 // MaxUint32 is the largest uint32 value. - MaxUint32 = 4294967295 + MaxUint32 = math.MaxUint32 // MaxRange is One more than the maximum allowed bitmap bit index. For use as an upper // bound for ranges. @@ -23,7 +24,7 @@ const ( // MaxUint16 is the largest 16 bit unsigned int. // This is the largest value an interval16 can store. - MaxUint16 = 65535 + MaxUint16 = math.MaxUint16 // Compute wordSizeInBytes, the size of a word in bytes. _m = ^uint64(0) diff --git a/vendor/github.com/axiomhq/hyperloglog/.gitignore b/vendor/github.com/axiomhq/hyperloglog/.gitignore new file mode 100644 index 000000000..a1338d685 --- /dev/null +++ b/vendor/github.com/axiomhq/hyperloglog/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/axiomhq/hyperloglog/LICENSE b/vendor/github.com/axiomhq/hyperloglog/LICENSE new file mode 100644 index 000000000..8436fdb43 --- /dev/null +++ b/vendor/github.com/axiomhq/hyperloglog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Axiom Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/axiomhq/hyperloglog/README.md b/vendor/github.com/axiomhq/hyperloglog/README.md new file mode 100644 index 000000000..a132622b2 --- /dev/null +++ b/vendor/github.com/axiomhq/hyperloglog/README.md @@ -0,0 +1,47 @@ +![Hyperloglog Logo](https://axiom.co/static/oss-hyperloglog.jpg) + + --- + +[![GoDoc](https://godoc.org/github.com/axiomhq/hyperloglog?status.svg)](https://godoc.org/github.com/axiomhq/hyperloglog) [![Go Report Card](https://goreportcard.com/badge/github.com/axiomhq/hyperloglog)](https://goreportcard.com/report/github.com/axiomhq/hyperloglog) [![CircleCI](https://circleci.com/gh/axiomhq/hyperloglog/tree/master.svg?style=svg)](https://circleci.com/gh/axiomhq/hyperloglog/tree/master) + +An improved version of [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) for the count-distinct problem, approximating the number of distinct elements in a multiset **using 33-50% less space** than other usual HyperLogLog implementations. + +This work is based on ["Better with fewer bits: Improving the performance of cardinality estimation of large data streams - Qingjun Xiao, You Zhou, Shigang Chen"](http://cse.seu.edu.cn/PersonalPage/csqjxiao/csqjxiao_files/papers/INFOCOM17.pdf). + +## Implementation + +The core differences between this and other implementations are: +* **use metro hash** instead of xxhash +* **sparse representation** for lower cardinalities (like HyperLogLog++) +* **loglog-beta** for dynamic bias correction medium and high cardinalities. +* **4-bit register** instead of 5 (HLL) and 6 (HLL++), but most implementations use 1-byte registers out of convenience + +In general it borrows a lot from [InfluxData's fork](https://github.com/influxdata/influxdb/tree/master/pkg/estimator/hll) of [Clark Duvall's HyperLogLog++ implementation](https://github.com/clarkduvall/hyperloglog), but uses **50% less space**. + +## Results +A direct comparison with the [HyperLogLog++ implementation used by InfluxDB](https://github.com/influxdata/influxdb/tree/master/pkg/estimator/hll) yielded the following results: + +| Exact | Axiom (8.2 KB) | Influx (16.39 KB) | +| --- | --- | --- | +| 10 | 10 (0.0% off) | 10 (0.0% off) | +| 50 | 50 (0.0% off) | 50 (0.0% off) | +| 250 | 250 (0.0% off) | 250 (0.0% off) | +| 1250 | 1249 (0.08% off) | 1249 (0.08% off) | +| 6250 | 6250 (0.0% off) | 6250 (0.0% off) | +| 31250 | **31008 (0.7744% off)** | 31565 (1.0080% off) | +| 156250 | **156013 (0.1517% off)** | 156652 (0.2573% off) | +| 781250 | **782364 (0.1426% off)** | 775988 (0.6735% off) | +| 3906250 | 3869332 (0.9451% off) | **3889909 (0.4183% off)** | +| 10000000 | **9952682 (0.4732% off)** |9889556 (1.1044% off) | + + +## Note +A big thank you to Prof. Shigang Chen and his team at the University of Florida who are actively conducting research around "Big Network Data". + +--- + +**An [Axiom](https://axiom.co) production.** + +Do you enjoy solving problems like these? If so, get in touch with us at [careers@axiom.co](mailto:careers@axiom.co)! + + diff --git a/vendor/github.com/axiomhq/hyperloglog/compressed.go b/vendor/github.com/axiomhq/hyperloglog/compressed.go new file mode 100644 index 000000000..4b908be46 --- /dev/null +++ b/vendor/github.com/axiomhq/hyperloglog/compressed.go @@ -0,0 +1,180 @@ +package hyperloglog + +import "encoding/binary" + +// Original author of this file is github.com/clarkduvall/hyperloglog +type iterable interface { + decode(i int, last uint32) (uint32, int) + Len() int + Iter() *iterator +} + +type iterator struct { + i int + last uint32 + v iterable +} + +func (iter *iterator) Next() uint32 { + n, i := iter.v.decode(iter.i, iter.last) + iter.last = n + iter.i = i + return n +} + +func (iter *iterator) Peek() uint32 { + n, _ := iter.v.decode(iter.i, iter.last) + return n +} + +func (iter iterator) HasNext() bool { + return iter.i < iter.v.Len() +} + +type compressedList struct { + count uint32 + last uint32 + b variableLengthList +} + +func (v *compressedList) Clone() *compressedList { + if v == nil { + return nil + } + + newV := &compressedList{ + count: v.count, + last: v.last, + } + + newV.b = make(variableLengthList, len(v.b)) + copy(newV.b, v.b) + return newV +} + +func (v *compressedList) MarshalBinary() (data []byte, err error) { + // Marshal the variableLengthList + bdata, err := v.b.MarshalBinary() + if err != nil { + return nil, err + } + + // At least 4 bytes for the two fixed sized values plus the size of bdata. + data = make([]byte, 0, 4+4+len(bdata)) + + // Marshal the count and last values. + data = append(data, []byte{ + // Number of items in the list. + byte(v.count >> 24), + byte(v.count >> 16), + byte(v.count >> 8), + byte(v.count), + // The last item in the list. + byte(v.last >> 24), + byte(v.last >> 16), + byte(v.last >> 8), + byte(v.last), + }...) + + // Append the list + return append(data, bdata...), nil +} + +func (v *compressedList) UnmarshalBinary(data []byte) error { + if len(data) < 12 { + return ErrorTooShort + } + + // Set the count. + v.count, data = binary.BigEndian.Uint32(data[:4]), data[4:] + + // Set the last value. + v.last, data = binary.BigEndian.Uint32(data[:4]), data[4:] + + // Set the list. + sz, data := binary.BigEndian.Uint32(data[:4]), data[4:] + v.b = make([]uint8, sz) + if uint32(len(data)) < sz { + return ErrorTooShort + } + for i := uint32(0); i < sz; i++ { + v.b[i] = data[i] + } + return nil +} + +func newCompressedList() *compressedList { + v := &compressedList{} + v.b = make(variableLengthList, 0) + return v +} + +func (v *compressedList) Len() int { + return len(v.b) +} + +func (v *compressedList) decode(i int, last uint32) (uint32, int) { + n, i := v.b.decode(i, last) + return n + last, i +} + +func (v *compressedList) Append(x uint32) { + v.count++ + v.b = v.b.Append(x - v.last) + v.last = x +} + +func (v *compressedList) Iter() *iterator { + return &iterator{0, 0, v} +} + +type variableLengthList []uint8 + +func (v variableLengthList) MarshalBinary() (data []byte, err error) { + // 4 bytes for the size of the list, and a byte for each element in the + // list. + data = make([]byte, 0, 4+v.Len()) + + // Length of the list. We only need 32 bits because the size of the set + // couldn't exceed that on 32 bit architectures. + sz := v.Len() + data = append(data, []byte{ + byte(sz >> 24), + byte(sz >> 16), + byte(sz >> 8), + byte(sz), + }...) + + // Marshal each element in the list. + for i := 0; i < sz; i++ { + data = append(data, v[i]) + } + + return data, nil +} + +func (v variableLengthList) Len() int { + return len(v) +} + +func (v *variableLengthList) Iter() *iterator { + return &iterator{0, 0, v} +} + +func (v variableLengthList) decode(i int, last uint32) (uint32, int) { + var x uint32 + j := i + for ; v[j]&0x80 != 0; j++ { + x |= uint32(v[j]&0x7f) << (uint(j-i) * 7) + } + x |= uint32(v[j]) << (uint(j-i) * 7) + return x, j + 1 +} + +func (v variableLengthList) Append(x uint32) variableLengthList { + for x&0xffffff80 != 0 { + v = append(v, uint8((x&0x7f)|0x80)) + x >>= 7 + } + return append(v, uint8(x&0x7f)) +} diff --git a/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go b/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go new file mode 100644 index 000000000..826639158 --- /dev/null +++ b/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go @@ -0,0 +1,424 @@ +package hyperloglog + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "sort" +) + +const ( + capacity = uint8(16) + pp = uint8(25) + mp = uint32(1) << pp + version = 1 +) + +// Sketch is a HyperLogLog data-structure for the count-distinct problem, +// approximating the number of distinct elements in a multiset. +type Sketch struct { + p uint8 + b uint8 + m uint32 + alpha float64 + tmpSet set + sparseList *compressedList + regs *registers +} + +// New returns a HyperLogLog Sketch with 2^14 registers (precision 14) +func New() *Sketch { + return New14() +} + +// New14 returns a HyperLogLog Sketch with 2^14 registers (precision 14) +func New14() *Sketch { + sk, _ := newSketch(14, true) + return sk +} + +// New16 returns a HyperLogLog Sketch with 2^16 registers (precision 16) +func New16() *Sketch { + sk, _ := newSketch(16, true) + return sk +} + +// NewNoSparse returns a HyperLogLog Sketch with 2^14 registers (precision 14) +// that will not use a sparse representation +func NewNoSparse() *Sketch { + sk, _ := newSketch(14, false) + return sk +} + +// New16NoSparse returns a HyperLogLog Sketch with 2^16 registers (precision 16) +// that will not use a sparse representation +func New16NoSparse() *Sketch { + sk, _ := newSketch(16, false) + return sk +} + +// newSketch returns a HyperLogLog Sketch with 2^precision registers +func newSketch(precision uint8, sparse bool) (*Sketch, error) { + if precision < 4 || precision > 18 { + return nil, fmt.Errorf("p has to be >= 4 and <= 18") + } + m := uint32(math.Pow(2, float64(precision))) + s := &Sketch{ + m: m, + p: precision, + alpha: alpha(float64(m)), + } + if sparse { + s.tmpSet = set{} + s.sparseList = newCompressedList() + } else { + s.regs = newRegisters(m) + } + return s, nil +} + +func (sk *Sketch) sparse() bool { + return sk.sparseList != nil +} + +// Clone returns a deep copy of sk. +func (sk *Sketch) Clone() *Sketch { + return &Sketch{ + b: sk.b, + p: sk.p, + m: sk.m, + alpha: sk.alpha, + tmpSet: sk.tmpSet.Clone(), + sparseList: sk.sparseList.Clone(), + regs: sk.regs.clone(), + } +} + +// Converts to normal if the sparse list is too large. +func (sk *Sketch) maybeToNormal() { + if uint32(len(sk.tmpSet))*100 > sk.m { + sk.mergeSparse() + if uint32(sk.sparseList.Len()) > sk.m { + sk.toNormal() + } + } +} + +// Merge takes another Sketch and combines it with Sketch h. +// If Sketch h is using the sparse Sketch, it will be converted +// to the normal Sketch. +func (sk *Sketch) Merge(other *Sketch) error { + if other == nil { + // Nothing to do + return nil + } + cpOther := other.Clone() + + if sk.p != cpOther.p { + return errors.New("precisions must be equal") + } + + if sk.sparse() && other.sparse() { + for k := range other.tmpSet { + sk.tmpSet.add(k) + } + for iter := other.sparseList.Iter(); iter.HasNext(); { + sk.tmpSet.add(iter.Next()) + } + sk.maybeToNormal() + return nil + } + + if sk.sparse() { + sk.toNormal() + } + + if cpOther.sparse() { + for k := range cpOther.tmpSet { + i, r := decodeHash(k, cpOther.p, pp) + sk.insert(i, r) + } + + for iter := cpOther.sparseList.Iter(); iter.HasNext(); { + i, r := decodeHash(iter.Next(), cpOther.p, pp) + sk.insert(i, r) + } + } else { + if sk.b < cpOther.b { + sk.regs.rebase(cpOther.b - sk.b) + sk.b = cpOther.b + } else { + cpOther.regs.rebase(sk.b - cpOther.b) + cpOther.b = sk.b + } + + for i, v := range cpOther.regs.tailcuts { + v1 := v.get(0) + if v1 > sk.regs.get(uint32(i)*2) { + sk.regs.set(uint32(i)*2, v1) + } + v2 := v.get(1) + if v2 > sk.regs.get(1+uint32(i)*2) { + sk.regs.set(1+uint32(i)*2, v2) + } + } + } + return nil +} + +// Convert from sparse Sketch to dense Sketch. +func (sk *Sketch) toNormal() { + if len(sk.tmpSet) > 0 { + sk.mergeSparse() + } + + sk.regs = newRegisters(sk.m) + for iter := sk.sparseList.Iter(); iter.HasNext(); { + i, r := decodeHash(iter.Next(), sk.p, pp) + sk.insert(i, r) + } + + sk.tmpSet = nil + sk.sparseList = nil +} + +func (sk *Sketch) insert(i uint32, r uint8) bool { + changed := false + if r-sk.b >= capacity { + //overflow + db := sk.regs.min() + if db > 0 { + sk.b += db + sk.regs.rebase(db) + changed = true + } + } + if r > sk.b { + val := r - sk.b + if c1 := capacity - 1; c1 < val { + val = c1 + } + + if val > sk.regs.get(i) { + sk.regs.set(i, val) + changed = true + } + } + return changed +} + +// Insert adds element e to sketch +func (sk *Sketch) Insert(e []byte) bool { + x := hash(e) + return sk.InsertHash(x) +} + +// InsertHash adds hash x to sketch +func (sk *Sketch) InsertHash(x uint64) bool { + if sk.sparse() { + changed := sk.tmpSet.add(encodeHash(x, sk.p, pp)) + if !changed { + return false + } + if uint32(len(sk.tmpSet))*100 > sk.m/2 { + sk.mergeSparse() + if uint32(sk.sparseList.Len()) > sk.m/2 { + sk.toNormal() + } + } + return true + } else { + i, r := getPosVal(x, sk.p) + return sk.insert(uint32(i), r) + } +} + +// Estimate returns the cardinality of the Sketch +func (sk *Sketch) Estimate() uint64 { + if sk.sparse() { + sk.mergeSparse() + return uint64(linearCount(mp, mp-sk.sparseList.count)) + } + + sum, ez := sk.regs.sumAndZeros(sk.b) + m := float64(sk.m) + var est float64 + + var beta func(float64) float64 + if sk.p < 16 { + beta = beta14 + } else { + beta = beta16 + } + + if sk.b == 0 { + est = (sk.alpha * m * (m - ez) / (sum + beta(ez))) + } else { + est = (sk.alpha * m * m / sum) + } + + return uint64(est + 0.5) +} + +func (sk *Sketch) mergeSparse() { + if len(sk.tmpSet) == 0 { + return + } + + keys := make(uint64Slice, 0, len(sk.tmpSet)) + for k := range sk.tmpSet { + keys = append(keys, k) + } + sort.Sort(keys) + + newList := newCompressedList() + for iter, i := sk.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); { + if !iter.HasNext() { + newList.Append(keys[i]) + i++ + continue + } + + if i >= len(keys) { + newList.Append(iter.Next()) + continue + } + + x1, x2 := iter.Peek(), keys[i] + if x1 == x2 { + newList.Append(iter.Next()) + i++ + } else if x1 > x2 { + newList.Append(x2) + i++ + } else { + newList.Append(iter.Next()) + } + } + + sk.sparseList = newList + sk.tmpSet = set{} +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (sk *Sketch) MarshalBinary() (data []byte, err error) { + // Marshal a version marker. + data = append(data, version) + // Marshal p. + data = append(data, sk.p) + // Marshal b + data = append(data, sk.b) + + if sk.sparse() { + // It's using the sparse Sketch. + data = append(data, byte(1)) + + // Add the tmp_set + tsdata, err := sk.tmpSet.MarshalBinary() + if err != nil { + return nil, err + } + data = append(data, tsdata...) + + // Add the sparse Sketch + sdata, err := sk.sparseList.MarshalBinary() + if err != nil { + return nil, err + } + return append(data, sdata...), nil + } + + // It's using the dense Sketch. + data = append(data, byte(0)) + + // Add the dense sketch Sketch. + sz := len(sk.regs.tailcuts) + data = append(data, []byte{ + byte(sz >> 24), + byte(sz >> 16), + byte(sz >> 8), + byte(sz), + }...) + + // Marshal each element in the list. + for i := 0; i < len(sk.regs.tailcuts); i++ { + data = append(data, byte(sk.regs.tailcuts[i])) + } + + return data, nil +} + +// ErrorTooShort is an error that UnmarshalBinary try to parse too short +// binary. +var ErrorTooShort = errors.New("too short binary") + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (sk *Sketch) UnmarshalBinary(data []byte) error { + if len(data) < 8 { + return ErrorTooShort + } + + // Unmarshal version. We may need this in the future if we make + // non-compatible changes. + _ = data[0] + + // Unmarshal p. + p := data[1] + + // Unmarshal b. + sk.b = data[2] + + // Determine if we need a sparse Sketch + sparse := data[3] == byte(1) + + // Make a newSketch Sketch if the precision doesn't match or if the Sketch was used + if sk.p != p || sk.regs != nil || len(sk.tmpSet) > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) { + newh, err := newSketch(p, sparse) + if err != nil { + return err + } + newh.b = sk.b + *sk = *newh + } + + // h is now initialised with the correct p. We just need to fill the + // rest of the details out. + if sparse { + // Using the sparse Sketch. + + // Unmarshal the tmp_set. + tssz := binary.BigEndian.Uint32(data[4:8]) + sk.tmpSet = make(map[uint32]struct{}, tssz) + + // We need to unmarshal tssz values in total, and each value requires us + // to read 4 bytes. + tsLastByte := int((tssz * 4) + 8) + for i := 8; i < tsLastByte; i += 4 { + k := binary.BigEndian.Uint32(data[i : i+4]) + sk.tmpSet[k] = struct{}{} + } + + // Unmarshal the sparse Sketch. + return sk.sparseList.UnmarshalBinary(data[tsLastByte:]) + } + + // Using the dense Sketch. + sk.sparseList = nil + sk.tmpSet = nil + dsz := binary.BigEndian.Uint32(data[4:8]) + sk.regs = newRegisters(dsz * 2) + data = data[8:] + + for i, val := range data { + sk.regs.tailcuts[i] = reg(val) + if uint8(sk.regs.tailcuts[i]<<4>>4) > 0 { + sk.regs.nz-- + } + if uint8(sk.regs.tailcuts[i]>>4) > 0 { + sk.regs.nz-- + } + } + + return nil +} diff --git a/vendor/github.com/axiomhq/hyperloglog/registers.go b/vendor/github.com/axiomhq/hyperloglog/registers.go new file mode 100644 index 000000000..19bb5d47f --- /dev/null +++ b/vendor/github.com/axiomhq/hyperloglog/registers.go @@ -0,0 +1,114 @@ +package hyperloglog + +import ( + "math" +) + +type reg uint8 +type tailcuts []reg + +type registers struct { + tailcuts + nz uint32 +} + +func (r *reg) set(offset, val uint8) bool { + var isZero bool + if offset == 0 { + isZero = *r < 16 + tmpVal := uint8((*r) << 4 >> 4) + *r = reg(tmpVal | (val << 4)) + } else { + isZero = *r&0x0f == 0 + tmpVal := uint8((*r) >> 4 << 4) + *r = reg(tmpVal | val) + } + return isZero +} + +func (r *reg) get(offset uint8) uint8 { + if offset == 0 { + return uint8((*r) >> 4) + } + return uint8((*r) << 4 >> 4) +} + +func newRegisters(size uint32) *registers { + return ®isters{ + tailcuts: make(tailcuts, size/2), + nz: size, + } +} + +func (rs *registers) clone() *registers { + if rs == nil { + return nil + } + tc := make([]reg, len(rs.tailcuts)) + copy(tc, rs.tailcuts) + return ®isters{ + tailcuts: tc, + nz: rs.nz, + } +} + +func (rs *registers) rebase(delta uint8) { + nz := uint32(len(rs.tailcuts)) * 2 + for i := range rs.tailcuts { + for j := uint8(0); j < 2; j++ { + val := rs.tailcuts[i].get(j) + if val >= delta { + rs.tailcuts[i].set(j, val-delta) + if val-delta > 0 { + nz-- + } + } + } + } + rs.nz = nz +} + +func (rs *registers) set(i uint32, val uint8) { + offset, index := uint8(i)&1, i/2 + if rs.tailcuts[index].set(offset, val) { + rs.nz-- + } +} + +func (rs *registers) get(i uint32) uint8 { + offset, index := uint8(i)&1, i/2 + return rs.tailcuts[index].get(offset) +} + +func (rs *registers) sumAndZeros(base uint8) (res, ez float64) { + for _, r := range rs.tailcuts { + for j := uint8(0); j < 2; j++ { + v := float64(base + r.get(j)) + if v == 0 { + ez++ + } + res += 1.0 / math.Pow(2.0, v) + } + } + rs.nz = uint32(ez) + return res, ez +} + +func (rs *registers) min() uint8 { + if rs.nz > 0 { + return 0 + } + min := uint8(math.MaxUint8) + for _, r := range rs.tailcuts { + if r == 0 || min == 0 { + return 0 + } + if val := uint8(r << 4 >> 4); val < min { + min = val + } + if val := uint8(r >> 4); val < min { + min = val + } + } + return min +} diff --git a/vendor/github.com/axiomhq/hyperloglog/sparse.go b/vendor/github.com/axiomhq/hyperloglog/sparse.go new file mode 100644 index 000000000..8c457d327 --- /dev/null +++ b/vendor/github.com/axiomhq/hyperloglog/sparse.go @@ -0,0 +1,92 @@ +package hyperloglog + +import ( + "math/bits" +) + +func getIndex(k uint32, p, pp uint8) uint32 { + if k&1 == 1 { + return bextr32(k, 32-p, p) + } + return bextr32(k, pp-p+1, p) +} + +// Encode a hash to be used in the sparse representation. +func encodeHash(x uint64, p, pp uint8) uint32 { + idx := uint32(bextr(x, 64-pp, pp)) + if bextr(x, 64-pp, pp-p) == 0 { + zeros := bits.LeadingZeros64((bextr(x, 0, 64-pp)<> 24), + byte(sl >> 16), + byte(sl >> 8), + byte(sl), + }...) + + // Marshal each element in the set. + for k := range s { + data = append(data, []byte{ + byte(k >> 24), + byte(k >> 16), + byte(k >> 8), + byte(k), + }...) + } + + return data, nil +} + +type uint64Slice []uint32 + +func (p uint64Slice) Len() int { return len(p) } +func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/axiomhq/hyperloglog/utils.go b/vendor/github.com/axiomhq/hyperloglog/utils.go new file mode 100644 index 000000000..58cd80cee --- /dev/null +++ b/vendor/github.com/axiomhq/hyperloglog/utils.go @@ -0,0 +1,70 @@ +package hyperloglog + +import ( + "math" + "math/bits" + + metro "github.com/dgryski/go-metro" +) + +var hash = hashFunc + +func beta14(ez float64) float64 { + zl := math.Log(ez + 1) + return -0.370393911*ez + + 0.070471823*zl + + 0.17393686*math.Pow(zl, 2) + + 0.16339839*math.Pow(zl, 3) + + -0.09237745*math.Pow(zl, 4) + + 0.03738027*math.Pow(zl, 5) + + -0.005384159*math.Pow(zl, 6) + + 0.00042419*math.Pow(zl, 7) +} + +func beta16(ez float64) float64 { + zl := math.Log(ez + 1) + return -0.37331876643753059*ez + + -1.41704077448122989*zl + + 0.40729184796612533*math.Pow(zl, 2) + + 1.56152033906584164*math.Pow(zl, 3) + + -0.99242233534286128*math.Pow(zl, 4) + + 0.26064681399483092*math.Pow(zl, 5) + + -0.03053811369682807*math.Pow(zl, 6) + + 0.00155770210179105*math.Pow(zl, 7) +} + +func alpha(m float64) float64 { + switch m { + case 16: + return 0.673 + case 32: + return 0.697 + case 64: + return 0.709 + } + return 0.7213 / (1 + 1.079/m) +} + +func getPosVal(x uint64, p uint8) (uint64, uint8) { + i := bextr(x, 64-p, p) // {x63,...,x64-p} + w := x<

> start) & ((1 << length) - 1) +} + +func bextr32(v uint32, start, length uint8) uint32 { + return (v >> start) & ((1 << length) - 1) +} + +func hashFunc(e []byte) uint64 { + return metro.Hash64(e, 1337) +} diff --git a/vendor/github.com/willf/bitset/.gitignore b/vendor/github.com/bits-and-blooms/bitset/.gitignore similarity index 100% rename from vendor/github.com/willf/bitset/.gitignore rename to vendor/github.com/bits-and-blooms/bitset/.gitignore diff --git a/vendor/github.com/willf/bitset/.travis.yml b/vendor/github.com/bits-and-blooms/bitset/.travis.yml similarity index 100% rename from vendor/github.com/willf/bitset/.travis.yml rename to vendor/github.com/bits-and-blooms/bitset/.travis.yml diff --git a/vendor/github.com/willf/bitset/LICENSE b/vendor/github.com/bits-and-blooms/bitset/LICENSE similarity index 100% rename from vendor/github.com/willf/bitset/LICENSE rename to vendor/github.com/bits-and-blooms/bitset/LICENSE diff --git a/vendor/github.com/willf/bitset/README.md b/vendor/github.com/bits-and-blooms/bitset/README.md similarity index 70% rename from vendor/github.com/willf/bitset/README.md rename to vendor/github.com/bits-and-blooms/bitset/README.md index 6c62b20c6..97e83071e 100644 --- a/vendor/github.com/willf/bitset/README.md +++ b/vendor/github.com/bits-and-blooms/bitset/README.md @@ -2,10 +2,9 @@ *Go language library to map between non-negative integers and boolean values* -[![Master Build Status](https://secure.travis-ci.org/willf/bitset.png?branch=master)](https://travis-ci.org/willf/bitset?branch=master) -[![Master Coverage Status](https://coveralls.io/repos/willf/bitset/badge.svg?branch=master&service=github)](https://coveralls.io/github/willf/bitset?branch=master) +[![Test](https://github.com/bits-and-blooms/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest) [![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset) -[![GoDoc](https://godoc.org/github.com/willf/bitset?status.svg)](http://godoc.org/github.com/willf/bitset) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/bits-and-blooms/bitset?tab=doc)](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc) ## Description @@ -30,7 +29,7 @@ import ( "fmt" "math/rand" - "github.com/willf/bitset" + "github.com/bits-and-blooms/bitset" ) func main() { @@ -63,8 +62,11 @@ func main() { As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets. -Godoc documentation is at: https://godoc.org/github.com/willf/bitset +Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc +## Memory Usage + +The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring). ## Implementation Note @@ -75,22 +77,17 @@ It is possible that a later version will match the `math/bits` return signature ## Installation ```bash -go get github.com/willf/bitset +go get github.com/bits-and-blooms/bitset ``` ## Contributing If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)") -This project include a Makefile that allows you to test and build the project with simple commands. -To see all available options: -```bash -make help -``` - ## Running all tests -Before committing the code, please check if it passes all tests using (note: this will install some dependencies): +Before committing the code, please check if it passes tests, has adequate coverage, etc. ```bash -make qa +go test +go test -cover ``` diff --git a/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml new file mode 100644 index 000000000..f9b295918 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml @@ -0,0 +1,39 @@ +# Go +# Build your Go project. +# Add steps that test, save build artifacts, deploy, and more: +# https://docs.microsoft.com/azure/devops/pipelines/languages/go + +trigger: +- master + +pool: + vmImage: 'Ubuntu-16.04' + +variables: + GOBIN: '$(GOPATH)/bin' # Go binaries path + GOROOT: '/usr/local/go1.11' # Go installation path + GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path + modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code + +steps: +- script: | + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + shopt -s extglob + shopt -s dotglob + mv !(gopath) '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + displayName: 'Set up the Go workspace' + +- script: | + go version + go get -v -t -d ./... + if [ -f Gopkg.toml ]; then + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure + fi + go build -v . + workingDirectory: '$(modulePath)' + displayName: 'Get dependencies, then build' diff --git a/vendor/github.com/willf/bitset/bitset.go b/vendor/github.com/bits-and-blooms/bitset/bitset.go similarity index 86% rename from vendor/github.com/willf/bitset/bitset.go rename to vendor/github.com/bits-and-blooms/bitset/bitset.go index 32044f5c8..d688806a5 100644 --- a/vendor/github.com/willf/bitset/bitset.go +++ b/vendor/github.com/bits-and-blooms/bitset/bitset.go @@ -129,7 +129,8 @@ func Cap() uint { return ^uint(0) } -// Len returns the length of the BitSet in words +// Len returns the number of bits in the BitSet. +// Note the difference to method Count, see example. func (b *BitSet) Len() uint { return b.length } @@ -137,6 +138,9 @@ func (b *BitSet) Len() uint { // extendSetMaybe adds additional words to incorporate new bits if needed func (b *BitSet) extendSetMaybe(i uint) { if i >= b.length { // if we need more bits, make 'em + if i >= Cap() { + panic("You are exceeding the capacity") + } nsize := wordsNeeded(i + 1) if b.set == nil { b.set = make([]uint64, nsize) @@ -159,7 +163,12 @@ func (b *BitSet) Test(i uint) bool { return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0 } -// Set bit i to 1 +// Set bit i to 1, the capacity of the bitset is automatically +// increased accordingly. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. func (b *BitSet) Set(i uint) *BitSet { b.extendSetMaybe(i) b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1)) @@ -175,7 +184,11 @@ func (b *BitSet) Clear(i uint) *BitSet { return b } -// SetTo sets bit i to value +// SetTo sets bit i to value. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. func (b *BitSet) SetTo(i uint, value bool) *BitSet { if value { return b.Set(i) @@ -183,7 +196,11 @@ func (b *BitSet) SetTo(i uint, value bool) *BitSet { return b.Clear(i) } -// Flip bit at i +// Flip bit at i. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. func (b *BitSet) Flip(i uint) *BitSet { if i >= b.length { return b.Set(i) @@ -192,26 +209,72 @@ func (b *BitSet) Flip(i uint) *BitSet { return b } -// Shrink shrinks BitSet to desired length in bits. It clears all bits > length -// and reduces the size and length of the set. +// FlipRange bit in [start, end). +// If end>= Cap(), this function will panic. +// Warning: using a very large value for 'end' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) FlipRange(start, end uint) *BitSet { + if start >= end { + return b + } + + b.extendSetMaybe(end - 1) + var startWord uint = start >> log2WordSize + var endWord uint = end >> log2WordSize + b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1))) + for i := startWord; i < endWord; i++ { + b.set[i] = ^b.set[i] + } + b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1)) + return b +} + +// Shrink shrinks BitSet so that the provided value is the last possible +// set value. It clears all bits > the provided index and reduces the size +// and length of the set. +// +// Note that the parameter value is not the new length in bits: it is the +// maximal value that can be stored in the bitset after the function call. +// The new length in bits is the parameter value + 1. Thus it is not possible +// to use this function to set the length to 0, the minimal value of the length +// after this function call is 1. // // A new slice is allocated to store the new bits, so you may see an increase in // memory usage until the GC runs. Normally this should not be a problem, but if you // have an extremely large BitSet its important to understand that the old BitSet will // remain in memory until the GC frees it. -func (b *BitSet) Shrink(length uint) *BitSet { - idx := wordsNeeded(length + 1) +func (b *BitSet) Shrink(lastbitindex uint) *BitSet { + length := lastbitindex + 1 + idx := wordsNeeded(length) if idx > len(b.set) { return b } shrunk := make([]uint64, idx) copy(shrunk, b.set[:idx]) b.set = shrunk - b.length = length + 1 - b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1)) - 1)) + b.length = length + b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1)))) return b } +// Compact shrinks BitSet to so that we preserve all set bits, while minimizing +// memory usage. Compact calls Shrink. +func (b *BitSet) Compact() *BitSet { + idx := len(b.set) - 1 + for ; idx >= 0 && b.set[idx] == 0; idx-- { + } + newlength := uint((idx + 1) << log2WordSize) + if newlength >= b.length { + return b // nothing to do + } + if newlength > 0 { + return b.Shrink(newlength - 1) + } + // We preserve one word + return b.Shrink(63) +} + // InsertAt takes an index which indicates where a bit should be // inserted. Then it shifts all the bits in the set to the left by 1, starting // from the given index position, and sets the index position to 0. @@ -322,6 +385,9 @@ func (b *BitSet) DeleteAt(i uint) *BitSet { // including possibly the current index // along with an error code (true = valid, false = no set bit found) // for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...} +// +// Users concerned with performance may want to use NextSetMany to +// retrieve several values at once. func (b *BitSet) NextSet(i uint) (uint, bool) { x := int(i >> log2WordSize) if x >= len(b.set) { @@ -357,6 +423,14 @@ func (b *BitSet) NextSet(i uint) (uint, bool) { // j += 1 // } // +// +// It is possible to retrieve all set bits as follow: +// +// indices := make([]uint, bitmap.Count()) +// bitmap.NextSetMany(0, indices) +// +// However if bitmap.Count() is large, it might be preferable to +// use several calls to NextSetMany, for performance reasons. func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) { myanswer := buffer capacity := cap(buffer) @@ -465,7 +539,8 @@ func (b *BitSet) Copy(c *BitSet) (count uint) { return } -// Count (number of set bits) +// Count (number of set bits). +// Also known as "popcount" or "population count". func (b *BitSet) Count() uint { if b != nil && b.set != nil { return uint(popcntSlice(b.set)) @@ -473,12 +548,12 @@ func (b *BitSet) Count() uint { return 0 } -// Equal tests the equvalence of two BitSets. +// Equal tests the equivalence of two BitSets. // False if they are of different sizes, otherwise true // only if all the same bits are set func (b *BitSet) Equal(c *BitSet) bool { - if c == nil { - return false + if c == nil || b == nil { + return c == b } if b.length != c.length { return false @@ -726,7 +801,7 @@ func (b *BitSet) All() bool { return b.Count() == b.length } -// None returns true if no bit is set, false otherwise. Retursn true for +// None returns true if no bit is set, false otherwise. Returns true for // empty sets. func (b *BitSet) None() bool { panicIfNull(b) @@ -807,7 +882,7 @@ func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { newset := New(uint(length)) if uint64(newset.length) != length { - return 0, errors.New("Unmarshalling error: type mismatch") + return 0, errors.New("unmarshalling error: type mismatch") } // Read remaining bytes as set diff --git a/vendor/github.com/willf/bitset/popcnt.go b/vendor/github.com/bits-and-blooms/bitset/popcnt.go similarity index 100% rename from vendor/github.com/willf/bitset/popcnt.go rename to vendor/github.com/bits-and-blooms/bitset/popcnt.go diff --git a/vendor/github.com/willf/bitset/popcnt_19.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go similarity index 100% rename from vendor/github.com/willf/bitset/popcnt_19.go rename to vendor/github.com/bits-and-blooms/bitset/popcnt_19.go diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go similarity index 100% rename from vendor/github.com/willf/bitset/popcnt_amd64.go rename to vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.s b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s similarity index 100% rename from vendor/github.com/willf/bitset/popcnt_amd64.s rename to vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s diff --git a/vendor/github.com/willf/bitset/popcnt_generic.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go similarity index 100% rename from vendor/github.com/willf/bitset/popcnt_generic.go rename to vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go diff --git a/vendor/github.com/willf/bitset/trailing_zeros_18.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go similarity index 100% rename from vendor/github.com/willf/bitset/trailing_zeros_18.go rename to vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go diff --git a/vendor/github.com/willf/bitset/trailing_zeros_19.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go similarity index 100% rename from vendor/github.com/willf/bitset/trailing_zeros_19.go rename to vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go diff --git a/vendor/github.com/blevesearch/bleve/v2/.travis.yml b/vendor/github.com/blevesearch/bleve/v2/.travis.yml deleted file mode 100644 index 7b7297afe..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -sudo: false - -language: go - -go: - - "1.12.x" - - "1.13.x" - - "1.14.x" - -script: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - - go get github.com/kisielk/errcheck - - go get -u github.com/FiloSottile/gvt - - gvt restore - - go test -race -v $(go list ./... | grep -v vendor/) - - go vet $(go list ./... | grep -v vendor/) - - go test ./test -v -indexType scorch - - errcheck -ignorepkg fmt $(go list ./... | grep -v vendor/); - - docs/project-code-coverage.sh - - docs/build_children.sh - -notifications: - email: - - marty.schoch@gmail.com diff --git a/vendor/github.com/blevesearch/bleve/v2/CONTRIBUTING.md b/vendor/github.com/blevesearch/bleve/v2/CONTRIBUTING.md deleted file mode 100644 index 5ebf3d65b..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/CONTRIBUTING.md +++ /dev/null @@ -1,16 +0,0 @@ -# Contributing to Bleve - -We look forward to your contributions, but ask that you first review these guidelines. - -### Sign the CLA - -As Bleve is a Couchbase project we require contributors accept the [Couchbase Contributor License Agreement](http://review.couchbase.org/static/individual_agreement.html). To sign this agreement log into the Couchbase [code review tool](http://review.couchbase.org/). The Bleve project does not use this code review tool but it is still used to track acceptance of the contributor license agreements. - -### Submitting a Pull Request - -All types of contributions are welcome, but please keep the following in mind: - -- If you're planning a large change, you should really discuss it in a github issue or on the google group first. This helps avoid duplicate effort and spending time on something that may not be merged. -- Existing tests should continue to pass, new tests for the contribution are nice to have. -- All code should have gone through `go fmt` -- All code should pass `go vet` diff --git a/vendor/github.com/blevesearch/bleve/v2/README.md b/vendor/github.com/blevesearch/bleve/v2/README.md deleted file mode 100644 index 30b1ecb5a..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# ![bleve](docs/bleve.png) bleve - -[![Tests](https://github.com/blevesearch/bleve/workflows/Tests/badge.svg?branch=master&event=push)](https://github.com/blevesearch/bleve/actions?query=workflow%3ATests+event%3Apush+branch%3Amaster) -[![Coverage Status](https://coveralls.io/repos/github/blevesearch/bleve/badge.svg?branch=master)](https://coveralls.io/github/blevesearch/bleve?branch=master) -[![GoDoc](https://godoc.org/github.com/blevesearch/bleve?status.svg)](https://godoc.org/github.com/blevesearch/bleve) -[![Join the chat at https://gitter.im/blevesearch/bleve](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/blevesearch/bleve?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![codebeat](https://codebeat.co/badges/38a7cbc9-9cf5-41c0-a315-0746178230f4)](https://codebeat.co/projects/github-com-blevesearch-bleve) -[![Go Report Card](https://goreportcard.com/badge/blevesearch/bleve)](https://goreportcard.com/report/blevesearch/bleve) -[![Sourcegraph](https://sourcegraph.com/github.com/blevesearch/bleve/-/badge.svg)](https://sourcegraph.com/github.com/blevesearch/bleve?badge) -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) - -modern text indexing in go - [blevesearch.com](http://www.blevesearch.com/) - -## Features - -* Index any go data structure (including JSON) -* Intelligent defaults backed up by powerful configuration -* Supported field types: - * Text, Numeric, Date -* Supported query types: - * Term, Phrase, Match, Match Phrase, Prefix - * Conjunction, Disjunction, Boolean - * Numeric Range, Date Range - * Simple query [syntax](http://www.blevesearch.com/docs/Query-String-Query/) for human entry -* tf-idf Scoring -* Search result match highlighting -* Supports Aggregating Facets: - * Terms Facet - * Numeric Range Facet - * Date Range Facet - -## Discussion - -Discuss usage and development of bleve in the [google group](https://groups.google.com/forum/#!forum/bleve). - -## Indexing - -```go -message := struct{ - Id string - From string - Body string -}{ - Id: "example", - From: "marty.schoch@gmail.com", - Body: "bleve indexing is easy", -} - -mapping := bleve.NewIndexMapping() -index, err := bleve.New("example.bleve", mapping) -if err != nil { - panic(err) -} -index.Index(message.Id, message) -``` - -## Querying - -```go -index, _ := bleve.Open("example.bleve") -query := bleve.NewQueryStringQuery("bleve") -searchRequest := bleve.NewSearchRequest(query) -searchResult, _ := index.Search(searchRequest) -``` - -## License - -Apache License Version 2.0 diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/standard/standard.go b/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/standard/standard.go deleted file mode 100644 index 80a481be0..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/standard/standard.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package standard - -import ( - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/analysis/lang/en" - "github.com/blevesearch/bleve/v2/analysis/token/lowercase" - "github.com/blevesearch/bleve/v2/analysis/tokenizer/unicode" - "github.com/blevesearch/bleve/v2/registry" -) - -const Name = "standard" - -func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (*analysis.Analyzer, error) { - tokenizer, err := cache.TokenizerNamed(unicode.Name) - if err != nil { - return nil, err - } - toLowerFilter, err := cache.TokenFilterNamed(lowercase.Name) - if err != nil { - return nil, err - } - stopEnFilter, err := cache.TokenFilterNamed(en.StopName) - if err != nil { - return nil, err - } - rv := analysis.Analyzer{ - Tokenizer: tokenizer, - TokenFilters: []analysis.TokenFilter{ - toLowerFilter, - stopEnFilter, - }, - } - return &rv, nil -} - -func init() { - registry.RegisterAnalyzer(Name, AnalyzerConstructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/datetime/flexible/flexible.go b/vendor/github.com/blevesearch/bleve/v2/analysis/datetime/flexible/flexible.go deleted file mode 100644 index 0eba074cd..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/datetime/flexible/flexible.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flexible - -import ( - "fmt" - "time" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/registry" -) - -const Name = "flexiblego" - -type DateTimeParser struct { - layouts []string -} - -func New(layouts []string) *DateTimeParser { - return &DateTimeParser{ - layouts: layouts, - } -} - -func (p *DateTimeParser) ParseDateTime(input string) (time.Time, error) { - for _, layout := range p.layouts { - rv, err := time.Parse(layout, input) - if err == nil { - return rv, nil - } - } - return time.Time{}, analysis.ErrInvalidDateTime -} - -func DateTimeParserConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.DateTimeParser, error) { - layouts, ok := config["layouts"].([]interface{}) - if !ok { - return nil, fmt.Errorf("must specify layouts") - } - var layoutStrs []string - for _, layout := range layouts { - layoutStr, ok := layout.(string) - if ok { - layoutStrs = append(layoutStrs, layoutStr) - } - } - return New(layoutStrs), nil -} - -func init() { - registry.RegisterDateTimeParser(Name, DateTimeParserConstructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/datetime/optional/optional.go b/vendor/github.com/blevesearch/bleve/v2/analysis/datetime/optional/optional.go deleted file mode 100644 index 196aa25cb..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/datetime/optional/optional.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package optional - -import ( - "time" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/analysis/datetime/flexible" - "github.com/blevesearch/bleve/v2/registry" -) - -const Name = "dateTimeOptional" - -const rfc3339NoTimezone = "2006-01-02T15:04:05" -const rfc3339NoTimezoneNoT = "2006-01-02 15:04:05" -const rfc3339NoTime = "2006-01-02" - -var layouts = []string{ - time.RFC3339Nano, - time.RFC3339, - rfc3339NoTimezone, - rfc3339NoTimezoneNoT, - rfc3339NoTime, -} - -func DateTimeParserConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.DateTimeParser, error) { - return flexible.New(layouts), nil -} - -func init() { - registry.RegisterDateTimeParser(Name, DateTimeParserConstructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/freq.go b/vendor/github.com/blevesearch/bleve/v2/analysis/freq.go deleted file mode 100644 index a0fd1a416..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/freq.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - index "github.com/blevesearch/bleve_index_api" -) - -func TokenFrequency(tokens TokenStream, arrayPositions []uint64, options index.FieldIndexingOptions) index.TokenFrequencies { - rv := make(map[string]*index.TokenFreq, len(tokens)) - - if options.IncludeTermVectors() { - tls := make([]index.TokenLocation, len(tokens)) - tlNext := 0 - - for _, token := range tokens { - tls[tlNext] = index.TokenLocation{ - ArrayPositions: arrayPositions, - Start: token.Start, - End: token.End, - Position: token.Position, - } - - curr, ok := rv[string(token.Term)] - if ok { - curr.Locations = append(curr.Locations, &tls[tlNext]) - } else { - curr = &index.TokenFreq{ - Term: token.Term, - Locations: []*index.TokenLocation{&tls[tlNext]}, - } - rv[string(token.Term)] = curr - } - - if !options.SkipFreqNorm() { - curr.SetFrequency(curr.Frequency() + 1) - } - - tlNext++ - } - } else { - for _, token := range tokens { - curr, exists := rv[string(token.Term)] - if !exists { - curr = &index.TokenFreq{ - Term: token.Term, - } - rv[string(token.Term)] = curr - } - - if !options.SkipFreqNorm() { - curr.SetFrequency(curr.Frequency() + 1) - } - } - } - - return rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/analyzer_en.go b/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/analyzer_en.go deleted file mode 100644 index 7a4ae5866..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/analyzer_en.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package en implements an analyzer with reasonable defaults for processing -// English text. -// -// It strips possessive suffixes ('s), transforms tokens to lower case, -// removes stopwords from a built-in list, and applies porter stemming. -// -// The built-in stopwords list is defined in EnglishStopWords. -package en - -import ( - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/registry" - - "github.com/blevesearch/bleve/v2/analysis/token/lowercase" - "github.com/blevesearch/bleve/v2/analysis/token/porter" - "github.com/blevesearch/bleve/v2/analysis/tokenizer/unicode" -) - -const AnalyzerName = "en" - -func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (*analysis.Analyzer, error) { - tokenizer, err := cache.TokenizerNamed(unicode.Name) - if err != nil { - return nil, err - } - possEnFilter, err := cache.TokenFilterNamed(PossessiveName) - if err != nil { - return nil, err - } - toLowerFilter, err := cache.TokenFilterNamed(lowercase.Name) - if err != nil { - return nil, err - } - stopEnFilter, err := cache.TokenFilterNamed(StopName) - if err != nil { - return nil, err - } - stemmerEnFilter, err := cache.TokenFilterNamed(porter.Name) - if err != nil { - return nil, err - } - rv := analysis.Analyzer{ - Tokenizer: tokenizer, - TokenFilters: []analysis.TokenFilter{ - possEnFilter, - toLowerFilter, - stopEnFilter, - stemmerEnFilter, - }, - } - return &rv, nil -} - -func init() { - registry.RegisterAnalyzer(AnalyzerName, AnalyzerConstructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/stop_filter_en.go b/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/stop_filter_en.go deleted file mode 100644 index a3f91d226..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/stop_filter_en.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package en - -import ( - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/analysis/token/stop" - "github.com/blevesearch/bleve/v2/registry" -) - -func StopTokenFilterConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) { - tokenMap, err := cache.TokenMapNamed(StopName) - if err != nil { - return nil, err - } - return stop.NewStopTokensFilter(tokenMap), nil -} - -func init() { - registry.RegisterTokenFilter(StopName, StopTokenFilterConstructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/builder.go b/vendor/github.com/blevesearch/bleve/v2/builder.go deleted file mode 100644 index dbb7e3ed4..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/builder.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2019 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -import ( - "encoding/json" - "fmt" - - "github.com/blevesearch/bleve/v2/document" - "github.com/blevesearch/bleve/v2/index/scorch" - "github.com/blevesearch/bleve/v2/mapping" - index "github.com/blevesearch/bleve_index_api" -) - -type builderImpl struct { - b index.IndexBuilder - m mapping.IndexMapping -} - -func (b *builderImpl) Index(id string, data interface{}) error { - if id == "" { - return ErrorEmptyID - } - - doc := document.NewDocument(id) - err := b.m.MapDocument(doc, data) - if err != nil { - return err - } - err = b.b.Index(doc) - return err -} - -func (b *builderImpl) Close() error { - return b.b.Close() -} - -func newBuilder(path string, mapping mapping.IndexMapping, config map[string]interface{}) (Builder, error) { - if path == "" { - return nil, fmt.Errorf("builder requires path") - } - - err := mapping.Validate() - if err != nil { - return nil, err - } - - if config == nil { - config = map[string]interface{}{} - } - - // the builder does not have an API to interact with internal storage - // however we can pass k/v pairs through the config - mappingBytes, err := json.Marshal(mapping) - if err != nil { - return nil, err - } - config["internal"] = map[string][]byte{ - string(mappingInternalKey): mappingBytes, - } - - // do not use real config, as these are options for the builder, - // not the resulting index - meta := newIndexMeta(scorch.Name, scorch.Name, map[string]interface{}{}) - err = meta.Save(path) - if err != nil { - return nil, err - } - - config["path"] = indexStorePath(path) - - b, err := scorch.NewBuilder(config) - if err != nil { - return nil, err - } - rv := &builderImpl{ - b: b, - m: mapping, - } - - return rv, nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/config.go b/vendor/github.com/blevesearch/bleve/v2/config.go deleted file mode 100644 index 0622b359d..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/config.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -import ( - "expvar" - "io/ioutil" - "log" - "time" - - "github.com/blevesearch/bleve/v2/index/scorch" - "github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap" - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/bleve/v2/search/highlight/highlighter/html" - index "github.com/blevesearch/bleve_index_api" -) - -var bleveExpVar = expvar.NewMap("bleve") - -type configuration struct { - Cache *registry.Cache - DefaultHighlighter string - DefaultKVStore string - DefaultMemKVStore string - DefaultIndexType string - SlowSearchLogThreshold time.Duration - analysisQueue *index.AnalysisQueue -} - -func (c *configuration) SetAnalysisQueueSize(n int) { - if c.analysisQueue != nil { - c.analysisQueue.Close() - } - c.analysisQueue = index.NewAnalysisQueue(n) -} - -func (c *configuration) Shutdown() { - c.SetAnalysisQueueSize(0) -} - -func newConfiguration() *configuration { - return &configuration{ - Cache: registry.NewCache(), - analysisQueue: index.NewAnalysisQueue(4), - } -} - -// Config contains library level configuration -var Config *configuration - -func init() { - bootStart := time.Now() - - // build the default configuration - Config = newConfiguration() - - // set the default highlighter - Config.DefaultHighlighter = html.Name - - // default kv store - Config.DefaultKVStore = "" - - // default mem only kv store - Config.DefaultMemKVStore = gtreap.Name - - // default index - Config.DefaultIndexType = scorch.Name - - bootDuration := time.Since(bootStart) - bleveExpVar.Add("bootDuration", int64(bootDuration)) - indexStats = NewIndexStats() - bleveExpVar.Set("indexes", indexStats) - - initDisk() -} - -var logger = log.New(ioutil.Discard, "bleve", log.LstdFlags) - -// SetLog sets the logger used for logging -// by default log messages are sent to ioutil.Discard -func SetLog(l *log.Logger) { - logger = l -} diff --git a/vendor/github.com/blevesearch/bleve/v2/doc.go b/vendor/github.com/blevesearch/bleve/v2/doc.go deleted file mode 100644 index d54af7c9a..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/doc.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package bleve is a library for indexing and searching text. - -Example Opening New Index, Indexing Data - - message := struct{ - Id: "example" - From: "marty.schoch@gmail.com", - Body: "bleve indexing is easy", - } - - mapping := bleve.NewIndexMapping() - index, _ := bleve.New("example.bleve", mapping) - index.Index(message.Id, message) - -Example Opening Existing Index, Searching Data - - index, _ := bleve.Open("example.bleve") - query := bleve.NewQueryStringQuery("bleve") - searchRequest := bleve.NewSearchRequest(query) - searchResult, _ := index.Search(searchRequest) - -*/ -package bleve diff --git a/vendor/github.com/blevesearch/bleve/v2/document/document.go b/vendor/github.com/blevesearch/bleve/v2/document/document.go deleted file mode 100644 index 1a6050f0a..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/document/document.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package document - -import ( - "fmt" - "reflect" - - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeDocument int - -func init() { - var d Document - reflectStaticSizeDocument = int(reflect.TypeOf(d).Size()) -} - -type Document struct { - id string `json:"id"` - Fields []Field `json:"fields"` - CompositeFields []*CompositeField -} - -func NewDocument(id string) *Document { - return &Document{ - id: id, - Fields: make([]Field, 0), - CompositeFields: make([]*CompositeField, 0), - } -} - -func (d *Document) Size() int { - sizeInBytes := reflectStaticSizeDocument + size.SizeOfPtr + - len(d.id) - - for _, entry := range d.Fields { - sizeInBytes += entry.Size() - } - - for _, entry := range d.CompositeFields { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -func (d *Document) AddField(f Field) *Document { - switch f := f.(type) { - case *CompositeField: - d.CompositeFields = append(d.CompositeFields, f) - default: - d.Fields = append(d.Fields, f) - } - return d -} - -func (d *Document) GoString() string { - fields := "" - for i, field := range d.Fields { - if i != 0 { - fields += ", " - } - fields += fmt.Sprintf("%#v", field) - } - compositeFields := "" - for i, field := range d.CompositeFields { - if i != 0 { - compositeFields += ", " - } - compositeFields += fmt.Sprintf("%#v", field) - } - return fmt.Sprintf("&document.Document{ID:%s, Fields: %s, CompositeFields: %s}", d.ID(), fields, compositeFields) -} - -func (d *Document) NumPlainTextBytes() uint64 { - rv := uint64(0) - for _, field := range d.Fields { - rv += field.NumPlainTextBytes() - } - for _, compositeField := range d.CompositeFields { - for _, field := range d.Fields { - if compositeField.includesField(field.Name()) { - rv += field.NumPlainTextBytes() - } - } - } - return rv -} - -func (d *Document) ID() string { - return d.id -} - -func (d *Document) SetID(id string) { - d.id = id -} - -func (d *Document) AddIDField() { - d.AddField(NewTextFieldCustom("_id", nil, []byte(d.ID()), index.IndexField|index.StoreField, nil)) -} - -func (d *Document) VisitFields(visitor index.FieldVisitor) { - for _, f := range d.Fields { - visitor(f) - } -} - -func (d *Document) VisitComposite(visitor index.CompositeFieldVisitor) { - for _, f := range d.CompositeFields { - visitor(f) - } -} - -func (d *Document) HasComposite() bool { - return len(d.CompositeFields) > 0 -} diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field.go b/vendor/github.com/blevesearch/bleve/v2/document/field.go deleted file mode 100644 index eb104e2df..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/document/field.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package document - -import ( - index "github.com/blevesearch/bleve_index_api" -) - -type Field interface { - // Name returns the path of the field from the root DocumentMapping. - // A root field path is "field", a subdocument field is "parent.field". - Name() string - // ArrayPositions returns the intermediate document and field indices - // required to resolve the field value in the document. For example, if the - // field path is "doc1.doc2.field" where doc1 and doc2 are slices or - // arrays, ArrayPositions returns 2 indices used to resolve "doc2" value in - // "doc1", then "field" in "doc2". - ArrayPositions() []uint64 - Options() index.FieldIndexingOptions - Analyze() - Value() []byte - - // NumPlainTextBytes should return the number of plain text bytes - // that this field represents - this is a common metric for tracking - // the rate of indexing - NumPlainTextBytes() uint64 - - Size() int - - EncodedFieldType() byte - AnalyzedLength() int - AnalyzedTokenFrequencies() index.TokenFrequencies -} diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field_boolean.go b/vendor/github.com/blevesearch/bleve/v2/document/field_boolean.go deleted file mode 100644 index fdf3cc0e5..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/document/field_boolean.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package document - -import ( - "fmt" - "reflect" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeBooleanField int - -func init() { - var f BooleanField - reflectStaticSizeBooleanField = int(reflect.TypeOf(f).Size()) -} - -const DefaultBooleanIndexingOptions = index.StoreField | index.IndexField | index.DocValues - -type BooleanField struct { - name string - arrayPositions []uint64 - options index.FieldIndexingOptions - value []byte - numPlainTextBytes uint64 - length int - frequencies index.TokenFrequencies -} - -func (b *BooleanField) Size() int { - return reflectStaticSizeBooleanField + size.SizeOfPtr + - len(b.name) + - len(b.arrayPositions)*size.SizeOfUint64 + - len(b.value) -} - -func (b *BooleanField) Name() string { - return b.name -} - -func (b *BooleanField) ArrayPositions() []uint64 { - return b.arrayPositions -} - -func (b *BooleanField) Options() index.FieldIndexingOptions { - return b.options -} - -func (b *BooleanField) Analyze() { - tokens := make(analysis.TokenStream, 0) - tokens = append(tokens, &analysis.Token{ - Start: 0, - End: len(b.value), - Term: b.value, - Position: 1, - Type: analysis.Boolean, - }) - - b.length = len(tokens) - b.frequencies = analysis.TokenFrequency(tokens, b.arrayPositions, b.options) -} - -func (b *BooleanField) Value() []byte { - return b.value -} - -func (b *BooleanField) Boolean() (bool, error) { - if len(b.value) == 1 { - return b.value[0] == 'T', nil - } - return false, fmt.Errorf("boolean field has %d bytes", len(b.value)) -} - -func (b *BooleanField) GoString() string { - return fmt.Sprintf("&document.BooleanField{Name:%s, Options: %s, Value: %s}", b.name, b.options, b.value) -} - -func (b *BooleanField) NumPlainTextBytes() uint64 { - return b.numPlainTextBytes -} - -func (b *BooleanField) EncodedFieldType() byte { - return 'b' -} - -func (b *BooleanField) AnalyzedLength() int { - return b.length -} - -func (b *BooleanField) AnalyzedTokenFrequencies() index.TokenFrequencies { - return b.frequencies -} - -func NewBooleanFieldFromBytes(name string, arrayPositions []uint64, value []byte) *BooleanField { - return &BooleanField{ - name: name, - arrayPositions: arrayPositions, - value: value, - options: DefaultNumericIndexingOptions, - numPlainTextBytes: uint64(len(value)), - } -} - -func NewBooleanField(name string, arrayPositions []uint64, b bool) *BooleanField { - return NewBooleanFieldWithIndexingOptions(name, arrayPositions, b, DefaultNumericIndexingOptions) -} - -func NewBooleanFieldWithIndexingOptions(name string, arrayPositions []uint64, b bool, options index.FieldIndexingOptions) *BooleanField { - numPlainTextBytes := 5 - v := []byte("F") - if b { - numPlainTextBytes = 4 - v = []byte("T") - } - return &BooleanField{ - name: name, - arrayPositions: arrayPositions, - value: v, - options: options, - numPlainTextBytes: uint64(numPlainTextBytes), - } -} diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field_composite.go b/vendor/github.com/blevesearch/bleve/v2/document/field_composite.go deleted file mode 100644 index 8c47643f5..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/document/field_composite.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package document - -import ( - "reflect" - - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeCompositeField int - -func init() { - var cf CompositeField - reflectStaticSizeCompositeField = int(reflect.TypeOf(cf).Size()) -} - -const DefaultCompositeIndexingOptions = index.IndexField - -type CompositeField struct { - name string - includedFields map[string]bool - excludedFields map[string]bool - defaultInclude bool - options index.FieldIndexingOptions - totalLength int - compositeFrequencies index.TokenFrequencies -} - -func NewCompositeField(name string, defaultInclude bool, include []string, exclude []string) *CompositeField { - return NewCompositeFieldWithIndexingOptions(name, defaultInclude, include, exclude, DefaultCompositeIndexingOptions) -} - -func NewCompositeFieldWithIndexingOptions(name string, defaultInclude bool, include []string, exclude []string, options index.FieldIndexingOptions) *CompositeField { - rv := &CompositeField{ - name: name, - options: options, - defaultInclude: defaultInclude, - includedFields: make(map[string]bool, len(include)), - excludedFields: make(map[string]bool, len(exclude)), - compositeFrequencies: make(index.TokenFrequencies), - } - - for _, i := range include { - rv.includedFields[i] = true - } - for _, e := range exclude { - rv.excludedFields[e] = true - } - - return rv -} - -func (c *CompositeField) Size() int { - sizeInBytes := reflectStaticSizeCompositeField + size.SizeOfPtr + - len(c.name) - - for k, _ := range c.includedFields { - sizeInBytes += size.SizeOfString + len(k) + size.SizeOfBool - } - - for k, _ := range c.excludedFields { - sizeInBytes += size.SizeOfString + len(k) + size.SizeOfBool - } - - return sizeInBytes -} - -func (c *CompositeField) Name() string { - return c.name -} - -func (c *CompositeField) ArrayPositions() []uint64 { - return []uint64{} -} - -func (c *CompositeField) Options() index.FieldIndexingOptions { - return c.options -} - -func (c *CompositeField) Analyze() { -} - -func (c *CompositeField) Value() []byte { - return []byte{} -} - -func (c *CompositeField) NumPlainTextBytes() uint64 { - return 0 -} - -func (c *CompositeField) includesField(field string) bool { - shouldInclude := c.defaultInclude - _, fieldShouldBeIncluded := c.includedFields[field] - if fieldShouldBeIncluded { - shouldInclude = true - } - _, fieldShouldBeExcluded := c.excludedFields[field] - if fieldShouldBeExcluded { - shouldInclude = false - } - return shouldInclude -} - -func (c *CompositeField) Compose(field string, length int, freq index.TokenFrequencies) { - if c.includesField(field) { - c.totalLength += length - c.compositeFrequencies.MergeAll(field, freq) - } -} - -func (c *CompositeField) EncodedFieldType() byte { - return 'c' -} - -func (c *CompositeField) AnalyzedLength() int { - return c.totalLength -} - -func (c *CompositeField) AnalyzedTokenFrequencies() index.TokenFrequencies { - return c.compositeFrequencies -} diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field_datetime.go b/vendor/github.com/blevesearch/bleve/v2/document/field_datetime.go deleted file mode 100644 index 650640550..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/document/field_datetime.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package document - -import ( - "fmt" - "math" - "reflect" - "time" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/numeric" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeDateTimeField int - -func init() { - var f DateTimeField - reflectStaticSizeDateTimeField = int(reflect.TypeOf(f).Size()) -} - -const DefaultDateTimeIndexingOptions = index.StoreField | index.IndexField | index.DocValues -const DefaultDateTimePrecisionStep uint = 4 - -var MinTimeRepresentable = time.Unix(0, math.MinInt64) -var MaxTimeRepresentable = time.Unix(0, math.MaxInt64) - -type DateTimeField struct { - name string - arrayPositions []uint64 - options index.FieldIndexingOptions - value numeric.PrefixCoded - numPlainTextBytes uint64 - length int - frequencies index.TokenFrequencies -} - -func (n *DateTimeField) Size() int { - return reflectStaticSizeDateTimeField + size.SizeOfPtr + - len(n.name) + - len(n.arrayPositions)*size.SizeOfUint64 -} - -func (n *DateTimeField) Name() string { - return n.name -} - -func (n *DateTimeField) ArrayPositions() []uint64 { - return n.arrayPositions -} - -func (n *DateTimeField) Options() index.FieldIndexingOptions { - return n.options -} - -func (n *DateTimeField) EncodedFieldType() byte { - return 'd' -} - -func (n *DateTimeField) AnalyzedLength() int { - return n.length -} - -func (n *DateTimeField) AnalyzedTokenFrequencies() index.TokenFrequencies { - return n.frequencies -} - -func (n *DateTimeField) Analyze() { - tokens := make(analysis.TokenStream, 0) - tokens = append(tokens, &analysis.Token{ - Start: 0, - End: len(n.value), - Term: n.value, - Position: 1, - Type: analysis.DateTime, - }) - - original, err := n.value.Int64() - if err == nil { - - shift := DefaultDateTimePrecisionStep - for shift < 64 { - shiftEncoded, err := numeric.NewPrefixCodedInt64(original, shift) - if err != nil { - break - } - token := analysis.Token{ - Start: 0, - End: len(shiftEncoded), - Term: shiftEncoded, - Position: 1, - Type: analysis.DateTime, - } - tokens = append(tokens, &token) - shift += DefaultDateTimePrecisionStep - } - } - - n.length = len(tokens) - n.frequencies = analysis.TokenFrequency(tokens, n.arrayPositions, n.options) -} - -func (n *DateTimeField) Value() []byte { - return n.value -} - -func (n *DateTimeField) DateTime() (time.Time, error) { - i64, err := n.value.Int64() - if err != nil { - return time.Time{}, err - } - return time.Unix(0, i64).UTC(), nil -} - -func (n *DateTimeField) GoString() string { - return fmt.Sprintf("&document.DateField{Name:%s, Options: %s, Value: %s}", n.name, n.options, n.value) -} - -func (n *DateTimeField) NumPlainTextBytes() uint64 { - return n.numPlainTextBytes -} - -func NewDateTimeFieldFromBytes(name string, arrayPositions []uint64, value []byte) *DateTimeField { - return &DateTimeField{ - name: name, - arrayPositions: arrayPositions, - value: value, - options: DefaultDateTimeIndexingOptions, - numPlainTextBytes: uint64(len(value)), - } -} - -func NewDateTimeField(name string, arrayPositions []uint64, dt time.Time) (*DateTimeField, error) { - return NewDateTimeFieldWithIndexingOptions(name, arrayPositions, dt, DefaultDateTimeIndexingOptions) -} - -func NewDateTimeFieldWithIndexingOptions(name string, arrayPositions []uint64, dt time.Time, options index.FieldIndexingOptions) (*DateTimeField, error) { - if canRepresent(dt) { - dtInt64 := dt.UnixNano() - prefixCoded := numeric.MustNewPrefixCodedInt64(dtInt64, 0) - return &DateTimeField{ - name: name, - arrayPositions: arrayPositions, - value: prefixCoded, - options: options, - // not correct, just a place holder until we revisit how fields are - // represented and can fix this better - numPlainTextBytes: uint64(8), - }, nil - } - return nil, fmt.Errorf("cannot represent %s in this type", dt) -} - -func canRepresent(dt time.Time) bool { - if dt.Before(MinTimeRepresentable) || dt.After(MaxTimeRepresentable) { - return false - } - return true -} diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field_geopoint.go b/vendor/github.com/blevesearch/bleve/v2/document/field_geopoint.go deleted file mode 100644 index 89de4e3bf..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/document/field_geopoint.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package document - -import ( - "fmt" - "reflect" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/geo" - "github.com/blevesearch/bleve/v2/numeric" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeGeoPointField int - -func init() { - var f GeoPointField - reflectStaticSizeGeoPointField = int(reflect.TypeOf(f).Size()) -} - -var GeoPrecisionStep uint = 9 - -type GeoPointField struct { - name string - arrayPositions []uint64 - options index.FieldIndexingOptions - value numeric.PrefixCoded - numPlainTextBytes uint64 - length int - frequencies index.TokenFrequencies -} - -func (n *GeoPointField) Size() int { - return reflectStaticSizeGeoPointField + size.SizeOfPtr + - len(n.name) + - len(n.arrayPositions)*size.SizeOfUint64 -} - -func (n *GeoPointField) Name() string { - return n.name -} - -func (n *GeoPointField) ArrayPositions() []uint64 { - return n.arrayPositions -} - -func (n *GeoPointField) Options() index.FieldIndexingOptions { - return n.options -} - -func (n *GeoPointField) EncodedFieldType() byte { - return 'g' -} - -func (n *GeoPointField) AnalyzedLength() int { - return n.length -} - -func (n *GeoPointField) AnalyzedTokenFrequencies() index.TokenFrequencies { - return n.frequencies -} - -func (n *GeoPointField) Analyze() { - tokens := make(analysis.TokenStream, 0) - tokens = append(tokens, &analysis.Token{ - Start: 0, - End: len(n.value), - Term: n.value, - Position: 1, - Type: analysis.Numeric, - }) - - original, err := n.value.Int64() - if err == nil { - - shift := GeoPrecisionStep - for shift < 64 { - shiftEncoded, err := numeric.NewPrefixCodedInt64(original, shift) - if err != nil { - break - } - token := analysis.Token{ - Start: 0, - End: len(shiftEncoded), - Term: shiftEncoded, - Position: 1, - Type: analysis.Numeric, - } - tokens = append(tokens, &token) - shift += GeoPrecisionStep - } - } - - n.length = len(tokens) - n.frequencies = analysis.TokenFrequency(tokens, n.arrayPositions, n.options) -} - -func (n *GeoPointField) Value() []byte { - return n.value -} - -func (n *GeoPointField) Lon() (float64, error) { - i64, err := n.value.Int64() - if err != nil { - return 0.0, err - } - return geo.MortonUnhashLon(uint64(i64)), nil -} - -func (n *GeoPointField) Lat() (float64, error) { - i64, err := n.value.Int64() - if err != nil { - return 0.0, err - } - return geo.MortonUnhashLat(uint64(i64)), nil -} - -func (n *GeoPointField) GoString() string { - return fmt.Sprintf("&document.GeoPointField{Name:%s, Options: %s, Value: %s}", n.name, n.options, n.value) -} - -func (n *GeoPointField) NumPlainTextBytes() uint64 { - return n.numPlainTextBytes -} - -func NewGeoPointFieldFromBytes(name string, arrayPositions []uint64, value []byte) *GeoPointField { - return &GeoPointField{ - name: name, - arrayPositions: arrayPositions, - value: value, - options: DefaultNumericIndexingOptions, - numPlainTextBytes: uint64(len(value)), - } -} - -func NewGeoPointField(name string, arrayPositions []uint64, lon, lat float64) *GeoPointField { - return NewGeoPointFieldWithIndexingOptions(name, arrayPositions, lon, lat, DefaultNumericIndexingOptions) -} - -func NewGeoPointFieldWithIndexingOptions(name string, arrayPositions []uint64, lon, lat float64, options index.FieldIndexingOptions) *GeoPointField { - mhash := geo.MortonHash(lon, lat) - prefixCoded := numeric.MustNewPrefixCodedInt64(int64(mhash), 0) - return &GeoPointField{ - name: name, - arrayPositions: arrayPositions, - value: prefixCoded, - options: options, - // not correct, just a place holder until we revisit how fields are - // represented and can fix this better - numPlainTextBytes: uint64(8), - } -} diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field_numeric.go b/vendor/github.com/blevesearch/bleve/v2/document/field_numeric.go deleted file mode 100644 index a54b082b4..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/document/field_numeric.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package document - -import ( - "fmt" - "reflect" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/numeric" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeNumericField int - -func init() { - var f NumericField - reflectStaticSizeNumericField = int(reflect.TypeOf(f).Size()) -} - -const DefaultNumericIndexingOptions = index.StoreField | index.IndexField | index.DocValues - -const DefaultPrecisionStep uint = 4 - -type NumericField struct { - name string - arrayPositions []uint64 - options index.FieldIndexingOptions - value numeric.PrefixCoded - numPlainTextBytes uint64 - length int - frequencies index.TokenFrequencies -} - -func (n *NumericField) Size() int { - return reflectStaticSizeNumericField + size.SizeOfPtr + - len(n.name) + - len(n.arrayPositions)*size.SizeOfPtr -} - -func (n *NumericField) Name() string { - return n.name -} - -func (n *NumericField) ArrayPositions() []uint64 { - return n.arrayPositions -} - -func (n *NumericField) Options() index.FieldIndexingOptions { - return n.options -} - -func (n *NumericField) EncodedFieldType() byte { - return 'n' -} - -func (n *NumericField) AnalyzedLength() int { - return n.length -} - -func (n *NumericField) AnalyzedTokenFrequencies() index.TokenFrequencies { - return n.frequencies -} - -func (n *NumericField) Analyze() { - tokens := make(analysis.TokenStream, 0) - tokens = append(tokens, &analysis.Token{ - Start: 0, - End: len(n.value), - Term: n.value, - Position: 1, - Type: analysis.Numeric, - }) - - original, err := n.value.Int64() - if err == nil { - - shift := DefaultPrecisionStep - for shift < 64 { - shiftEncoded, err := numeric.NewPrefixCodedInt64(original, shift) - if err != nil { - break - } - token := analysis.Token{ - Start: 0, - End: len(shiftEncoded), - Term: shiftEncoded, - Position: 1, - Type: analysis.Numeric, - } - tokens = append(tokens, &token) - shift += DefaultPrecisionStep - } - } - - n.length = len(tokens) - n.frequencies = analysis.TokenFrequency(tokens, n.arrayPositions, n.options) -} - -func (n *NumericField) Value() []byte { - return n.value -} - -func (n *NumericField) Number() (float64, error) { - i64, err := n.value.Int64() - if err != nil { - return 0.0, err - } - return numeric.Int64ToFloat64(i64), nil -} - -func (n *NumericField) GoString() string { - return fmt.Sprintf("&document.NumericField{Name:%s, Options: %s, Value: %s}", n.name, n.options, n.value) -} - -func (n *NumericField) NumPlainTextBytes() uint64 { - return n.numPlainTextBytes -} - -func NewNumericFieldFromBytes(name string, arrayPositions []uint64, value []byte) *NumericField { - return &NumericField{ - name: name, - arrayPositions: arrayPositions, - value: value, - options: DefaultNumericIndexingOptions, - numPlainTextBytes: uint64(len(value)), - } -} - -func NewNumericField(name string, arrayPositions []uint64, number float64) *NumericField { - return NewNumericFieldWithIndexingOptions(name, arrayPositions, number, DefaultNumericIndexingOptions) -} - -func NewNumericFieldWithIndexingOptions(name string, arrayPositions []uint64, number float64, options index.FieldIndexingOptions) *NumericField { - numberInt64 := numeric.Float64ToInt64(number) - prefixCoded := numeric.MustNewPrefixCodedInt64(numberInt64, 0) - return &NumericField{ - name: name, - arrayPositions: arrayPositions, - value: prefixCoded, - options: options, - // not correct, just a place holder until we revisit how fields are - // represented and can fix this better - numPlainTextBytes: uint64(8), - } -} diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field_text.go b/vendor/github.com/blevesearch/bleve/v2/document/field_text.go deleted file mode 100644 index 924de532c..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/document/field_text.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package document - -import ( - "fmt" - "reflect" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeTextField int - -func init() { - var f TextField - reflectStaticSizeTextField = int(reflect.TypeOf(f).Size()) -} - -const DefaultTextIndexingOptions = index.IndexField | index.DocValues - -type TextField struct { - name string - arrayPositions []uint64 - options index.FieldIndexingOptions - analyzer *analysis.Analyzer - value []byte - numPlainTextBytes uint64 - length int - frequencies index.TokenFrequencies -} - -func (t *TextField) Size() int { - return reflectStaticSizeTextField + size.SizeOfPtr + - len(t.name) + - len(t.arrayPositions)*size.SizeOfUint64 + - len(t.value) -} - -func (t *TextField) Name() string { - return t.name -} - -func (t *TextField) ArrayPositions() []uint64 { - return t.arrayPositions -} - -func (t *TextField) Options() index.FieldIndexingOptions { - return t.options -} - -func (t *TextField) EncodedFieldType() byte { - return 't' -} - -func (t *TextField) AnalyzedLength() int { - return t.length -} - -func (t *TextField) AnalyzedTokenFrequencies() index.TokenFrequencies { - return t.frequencies -} - -func (t *TextField) Analyze() { - var tokens analysis.TokenStream - if t.analyzer != nil { - bytesToAnalyze := t.Value() - if t.options.IsStored() { - // need to copy - bytesCopied := make([]byte, len(bytesToAnalyze)) - copy(bytesCopied, bytesToAnalyze) - bytesToAnalyze = bytesCopied - } - tokens = t.analyzer.Analyze(bytesToAnalyze) - } else { - tokens = analysis.TokenStream{ - &analysis.Token{ - Start: 0, - End: len(t.value), - Term: t.value, - Position: 1, - Type: analysis.AlphaNumeric, - }, - } - } - t.length = len(tokens) // number of tokens in this doc field - t.frequencies = analysis.TokenFrequency(tokens, t.arrayPositions, t.options) -} - -func (t *TextField) Analyzer() *analysis.Analyzer { - return t.analyzer -} - -func (t *TextField) Value() []byte { - return t.value -} - -func (t *TextField) Text() string { - return string(t.value) -} - -func (t *TextField) GoString() string { - return fmt.Sprintf("&document.TextField{Name:%s, Options: %s, Analyzer: %v, Value: %s, ArrayPositions: %v}", t.name, t.options, t.analyzer, t.value, t.arrayPositions) -} - -func (t *TextField) NumPlainTextBytes() uint64 { - return t.numPlainTextBytes -} - -func NewTextField(name string, arrayPositions []uint64, value []byte) *TextField { - return NewTextFieldWithIndexingOptions(name, arrayPositions, value, DefaultTextIndexingOptions) -} - -func NewTextFieldWithIndexingOptions(name string, arrayPositions []uint64, value []byte, options index.FieldIndexingOptions) *TextField { - return &TextField{ - name: name, - arrayPositions: arrayPositions, - options: options, - value: value, - numPlainTextBytes: uint64(len(value)), - } -} - -func NewTextFieldWithAnalyzer(name string, arrayPositions []uint64, value []byte, analyzer *analysis.Analyzer) *TextField { - return &TextField{ - name: name, - arrayPositions: arrayPositions, - options: DefaultTextIndexingOptions, - analyzer: analyzer, - value: value, - numPlainTextBytes: uint64(len(value)), - } -} - -func NewTextFieldCustom(name string, arrayPositions []uint64, value []byte, options index.FieldIndexingOptions, analyzer *analysis.Analyzer) *TextField { - return &TextField{ - name: name, - arrayPositions: arrayPositions, - options: options, - analyzer: analyzer, - value: value, - numPlainTextBytes: uint64(len(value)), - } -} diff --git a/vendor/github.com/blevesearch/bleve/v2/error.go b/vendor/github.com/blevesearch/bleve/v2/error.go deleted file mode 100644 index 7dd21194c..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/error.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -// Constant Error values which can be compared to determine the type of error -const ( - ErrorIndexPathExists Error = iota - ErrorIndexPathDoesNotExist - ErrorIndexMetaMissing - ErrorIndexMetaCorrupt - ErrorIndexClosed - ErrorAliasMulti - ErrorAliasEmpty - ErrorUnknownIndexType - ErrorEmptyID - ErrorIndexReadInconsistency -) - -// Error represents a more strongly typed bleve error for detecting -// and handling specific types of errors. -type Error int - -func (e Error) Error() string { - return errorMessages[e] -} - -var errorMessages = map[Error]string{ - ErrorIndexPathExists: "cannot create new index, path already exists", - ErrorIndexPathDoesNotExist: "cannot open index, path does not exist", - ErrorIndexMetaMissing: "cannot open index, metadata missing", - ErrorIndexMetaCorrupt: "cannot open index, metadata corrupt", - ErrorIndexClosed: "index is closed", - ErrorAliasMulti: "cannot perform single index operation on multiple index alias", - ErrorAliasEmpty: "cannot perform operation on empty alias", - ErrorUnknownIndexType: "unknown index type", - ErrorEmptyID: "document ID cannot be empty", - ErrorIndexReadInconsistency: "index read inconsistency detected", -} diff --git a/vendor/github.com/blevesearch/bleve/v2/geo/parse.go b/vendor/github.com/blevesearch/bleve/v2/geo/parse.go deleted file mode 100644 index 8286805ff..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/geo/parse.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package geo - -import ( - "reflect" - "strconv" - "strings" -) - -// ExtractGeoPoint takes an arbitrary interface{} and tries it's best to -// interpret it is as geo point. Supported formats: -// Container: -// slice length 2 (GeoJSON) -// first element lon, second element lat -// string (coordinates separated by comma, or a geohash) -// first element lat, second element lon -// map[string]interface{} -// exact keys lat and lon or lng -// struct -// w/exported fields case-insensitive match on lat and lon or lng -// struct -// satisfying Later and Loner or Lnger interfaces -// -// in all cases values must be some sort of numeric-like thing: int/uint/float -func ExtractGeoPoint(thing interface{}) (lon, lat float64, success bool) { - var foundLon, foundLat bool - - thingVal := reflect.ValueOf(thing) - if !thingVal.IsValid() { - return lon, lat, false - } - - thingTyp := thingVal.Type() - - // is it a slice - if thingVal.Kind() == reflect.Slice { - // must be length 2 - if thingVal.Len() == 2 { - first := thingVal.Index(0) - if first.CanInterface() { - firstVal := first.Interface() - lon, foundLon = extractNumericVal(firstVal) - } - second := thingVal.Index(1) - if second.CanInterface() { - secondVal := second.Interface() - lat, foundLat = extractNumericVal(secondVal) - } - } - } - - // is it a string - if thingVal.Kind() == reflect.String { - geoStr := thingVal.Interface().(string) - if strings.Contains(geoStr, ",") { - // geo point with coordinates split by comma - points := strings.Split(geoStr, ",") - for i, point := range points { - // trim any leading or trailing white spaces - points[i] = strings.TrimSpace(point) - } - if len(points) == 2 { - var err error - lat, err = strconv.ParseFloat(points[0], 64) - if err == nil { - foundLat = true - } - lon, err = strconv.ParseFloat(points[1], 64) - if err == nil { - foundLon = true - } - } - } else { - // geohash - if len(geoStr) <= geoHashMaxLength { - lat, lon = DecodeGeoHash(geoStr) - foundLat = true - foundLon = true - } - } - } - - // is it a map - if l, ok := thing.(map[string]interface{}); ok { - if lval, ok := l["lon"]; ok { - lon, foundLon = extractNumericVal(lval) - } else if lval, ok := l["lng"]; ok { - lon, foundLon = extractNumericVal(lval) - } - if lval, ok := l["lat"]; ok { - lat, foundLat = extractNumericVal(lval) - } - } - - // now try reflection on struct fields - if thingVal.Kind() == reflect.Struct { - for i := 0; i < thingVal.NumField(); i++ { - fieldName := thingTyp.Field(i).Name - if strings.HasPrefix(strings.ToLower(fieldName), "lon") { - if thingVal.Field(i).CanInterface() { - fieldVal := thingVal.Field(i).Interface() - lon, foundLon = extractNumericVal(fieldVal) - } - } - if strings.HasPrefix(strings.ToLower(fieldName), "lng") { - if thingVal.Field(i).CanInterface() { - fieldVal := thingVal.Field(i).Interface() - lon, foundLon = extractNumericVal(fieldVal) - } - } - if strings.HasPrefix(strings.ToLower(fieldName), "lat") { - if thingVal.Field(i).CanInterface() { - fieldVal := thingVal.Field(i).Interface() - lat, foundLat = extractNumericVal(fieldVal) - } - } - } - } - - // last hope, some interfaces - // lon - if l, ok := thing.(loner); ok { - lon = l.Lon() - foundLon = true - } else if l, ok := thing.(lnger); ok { - lon = l.Lng() - foundLon = true - } - // lat - if l, ok := thing.(later); ok { - lat = l.Lat() - foundLat = true - } - - return lon, lat, foundLon && foundLat -} - -// extract numeric value (if possible) and returns a float64 -func extractNumericVal(v interface{}) (float64, bool) { - val := reflect.ValueOf(v) - if !val.IsValid() { - return 0, false - } - typ := val.Type() - switch typ.Kind() { - case reflect.Float32, reflect.Float64: - return val.Float(), true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(val.Int()), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return float64(val.Uint()), true - } - - return 0, false -} - -// various support interfaces which can be used to find lat/lon -type loner interface { - Lon() float64 -} - -type later interface { - Lat() float64 -} - -type lnger interface { - Lng() float64 -} diff --git a/vendor/github.com/blevesearch/bleve/v2/go.mod b/vendor/github.com/blevesearch/bleve/v2/go.mod deleted file mode 100644 index a1a6b5977..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/go.mod +++ /dev/null @@ -1,29 +0,0 @@ -module github.com/blevesearch/bleve/v2 - -go 1.13 - -require ( - github.com/RoaringBitmap/roaring v0.4.23 - github.com/blevesearch/bleve_index_api v1.0.0 - github.com/blevesearch/go-porterstemmer v1.0.3 - github.com/blevesearch/scorch_segment_api/v2 v2.0.1 - github.com/blevesearch/segment v0.9.0 - github.com/blevesearch/snowballstem v0.9.0 - github.com/blevesearch/upsidedown_store_api v1.0.1 - github.com/blevesearch/vellum v1.0.3 - github.com/blevesearch/zapx/v11 v11.2.0 - github.com/blevesearch/zapx/v12 v12.2.0 - github.com/blevesearch/zapx/v13 v13.2.0 - github.com/blevesearch/zapx/v14 v14.2.0 - github.com/blevesearch/zapx/v15 v15.2.0 - github.com/couchbase/moss v0.1.0 - github.com/golang/protobuf v1.3.2 - github.com/kljensen/snowball v0.6.0 - github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 - github.com/spf13/cobra v0.0.5 - github.com/steveyen/gtreap v0.1.0 - github.com/syndtr/goleveldb v1.0.0 - github.com/willf/bitset v1.1.10 - go.etcd.io/bbolt v1.3.5 - golang.org/x/text v0.3.0 -) diff --git a/vendor/github.com/blevesearch/bleve/v2/go.sum b/vendor/github.com/blevesearch/bleve/v2/go.sum deleted file mode 100644 index e1e8edd18..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/go.sum +++ /dev/null @@ -1,128 +0,0 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= -github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= -github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo= -github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= -github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= -github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1 h1:fd+hPtZ8GsbqPK1HslGp7Vhoik4arZteA/IsCEgOisw= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1/go.mod h1:lq7yK2jQy1yQjtjTfU931aVqz7pYxEudHaDwOt1tXfU= -github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt22Ac= -github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= -github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s= -github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= -github.com/blevesearch/upsidedown_store_api v1.0.1 h1:1SYRwyoFLwG3sj0ed89RLtM15amfX2pXlYbFOnF8zNU= -github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q= -github.com/blevesearch/vellum v1.0.3 h1:U86G41A7CtXNzzpIJHM8lSTUqz1Mp8U870TkcdCzZc8= -github.com/blevesearch/vellum v1.0.3/go.mod h1:2u5ax02KeDuNWu4/C+hVQMD6uLN4txH1JbtpaDNLJRo= -github.com/blevesearch/zapx/v11 v11.2.0 h1:GBkCJYsyj3eIU4+aiLPxoMz1PYvDbQZl/oXHIBZIP60= -github.com/blevesearch/zapx/v11 v11.2.0/go.mod h1:gN/a0alGw1FZt/YGTo1G6Z6XpDkeOfujX5exY9sCQQM= -github.com/blevesearch/zapx/v12 v12.2.0 h1:dyRcSoZVO1jktL4UpGkCEF1AYa3xhKPirh4/N+Va+Ww= -github.com/blevesearch/zapx/v12 v12.2.0/go.mod h1:fdjwvCwWWwJW/EYTYGtAp3gBA0geCYGLcVTtJEZnY6A= -github.com/blevesearch/zapx/v13 v13.2.0 h1:mUqbaqQABp8nBE4t4q2qMyHCCq4sykoV8r7aJk4ih3s= -github.com/blevesearch/zapx/v13 v13.2.0/go.mod h1:o5rAy/lRS5JpAbITdrOHBS/TugWYbkcYZTz6VfEinAQ= -github.com/blevesearch/zapx/v14 v14.2.0 h1:UsfRqvM9RJxKNKrkR1U7aYc1cv9MWx719fsAjbF6joI= -github.com/blevesearch/zapx/v14 v14.2.0/go.mod h1:GNgZusc1p4ot040cBQMRGEZobvwjCquiEKYh1xLFK9g= -github.com/blevesearch/zapx/v15 v15.2.0 h1:ZpibwcrrOaeslkOw3sJ7npP7KDgRHI/DkACjKTqFwyM= -github.com/blevesearch/zapx/v15 v15.2.0/go.mod h1:MmQceLpWfME4n1WrBFIwplhWmaQbQqLQARpaKUEOs/A= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/couchbase/ghistogram v0.1.0 h1:b95QcQTCzjTUocDXp/uMgSNQi8oj1tGwnJ4bODWZnps= -github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= -github.com/couchbase/moss v0.1.0 h1:HCL+xxHUwmOaL44kMM/gU08OW6QGCui1WVFO58bjhNI= -github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kljensen/snowball v0.6.0 h1:6DZLCcZeL0cLfodx+Md4/OLC6b/bfurWUOUGs1ydfOU= -github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= -github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/steveyen/gtreap v0.1.0 h1:CjhzTa274PyJLJuMZwIzCO1PfC00oRa8d1Kc78bFXJM= -github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/blevesearch/bleve/v2/index.go b/vendor/github.com/blevesearch/bleve/v2/index.go deleted file mode 100644 index e08271e51..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -import ( - "context" - "github.com/blevesearch/bleve/v2/index/upsidedown" - - "github.com/blevesearch/bleve/v2/document" - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -// A Batch groups together multiple Index and Delete -// operations you would like performed at the same -// time. The Batch structure is NOT thread-safe. -// You should only perform operations on a batch -// from a single thread at a time. Once batch -// execution has started, you may not modify it. -type Batch struct { - index Index - internal *index.Batch - - lastDocSize uint64 - totalSize uint64 -} - -// Index adds the specified index operation to the -// batch. NOTE: the bleve Index is not updated -// until the batch is executed. -func (b *Batch) Index(id string, data interface{}) error { - if id == "" { - return ErrorEmptyID - } - doc := document.NewDocument(id) - err := b.index.Mapping().MapDocument(doc, data) - if err != nil { - return err - } - b.internal.Update(doc) - - b.lastDocSize = uint64(doc.Size() + - len(id) + size.SizeOfString) // overhead from internal - b.totalSize += b.lastDocSize - - return nil -} - -func (b *Batch) LastDocSize() uint64 { - return b.lastDocSize -} - -func (b *Batch) TotalDocsSize() uint64 { - return b.totalSize -} - -// IndexAdvanced adds the specified index operation to the -// batch which skips the mapping. NOTE: the bleve Index is not updated -// until the batch is executed. -func (b *Batch) IndexAdvanced(doc *document.Document) (err error) { - if doc.ID() == "" { - return ErrorEmptyID - } - b.internal.Update(doc) - return nil -} - -// Delete adds the specified delete operation to the -// batch. NOTE: the bleve Index is not updated until -// the batch is executed. -func (b *Batch) Delete(id string) { - if id != "" { - b.internal.Delete(id) - } -} - -// SetInternal adds the specified set internal -// operation to the batch. NOTE: the bleve Index is -// not updated until the batch is executed. -func (b *Batch) SetInternal(key, val []byte) { - b.internal.SetInternal(key, val) -} - -// DeleteInternal adds the specified delete internal -// operation to the batch. NOTE: the bleve Index is -// not updated until the batch is executed. -func (b *Batch) DeleteInternal(key []byte) { - b.internal.DeleteInternal(key) -} - -// Size returns the total number of operations inside the batch -// including normal index operations and internal operations. -func (b *Batch) Size() int { - return len(b.internal.IndexOps) + len(b.internal.InternalOps) -} - -// String prints a user friendly string representation of what -// is inside this batch. -func (b *Batch) String() string { - return b.internal.String() -} - -// Reset returns a Batch to the empty state so that it can -// be re-used in the future. -func (b *Batch) Reset() { - b.internal.Reset() - b.lastDocSize = 0 - b.totalSize = 0 -} - -func (b *Batch) Merge(o *Batch) { - if o != nil && o.internal != nil { - b.internal.Merge(o.internal) - if o.LastDocSize() > 0 { - b.lastDocSize = o.LastDocSize() - } - b.totalSize = uint64(b.internal.TotalDocSize()) - } -} - -func (b *Batch) SetPersistedCallback(f index.BatchCallback) { - b.internal.SetPersistedCallback(f) -} - -func (b *Batch) PersistedCallback() index.BatchCallback { - return b.internal.PersistedCallback() -} - -// An Index implements all the indexing and searching -// capabilities of bleve. An Index can be created -// using the New() and Open() methods. -// -// Index() takes an input value, deduces a DocumentMapping for its type, -// assigns string paths to its fields or values then applies field mappings on -// them. -// -// The DocumentMapping used to index a value is deduced by the following rules: -// 1) If value implements mapping.bleveClassifier interface, resolve the mapping -// from BleveType(). -// 2) If value implements mapping.Classifier interface, resolve the mapping -// from Type(). -// 3) If value has a string field or value at IndexMapping.TypeField. -// (defaulting to "_type"), use it to resolve the mapping. Fields addressing -// is described below. -// 4) If IndexMapping.DefaultType is registered, return it. -// 5) Return IndexMapping.DefaultMapping. -// -// Each field or nested field of the value is identified by a string path, then -// mapped to one or several FieldMappings which extract the result for analysis. -// -// Struct values fields are identified by their "json:" tag, or by their name. -// Nested fields are identified by prefixing with their parent identifier, -// separated by a dot. -// -// Map values entries are identified by their string key. Entries not indexed -// by strings are ignored. Entry values are identified recursively like struct -// fields. -// -// Slice and array values are identified by their field name. Their elements -// are processed sequentially with the same FieldMapping. -// -// String, float64 and time.Time values are identified by their field name. -// Other types are ignored. -// -// Each value identifier is decomposed in its parts and recursively address -// SubDocumentMappings in the tree starting at the root DocumentMapping. If a -// mapping is found, all its FieldMappings are applied to the value. If no -// mapping is found and the root DocumentMapping is dynamic, default mappings -// are used based on value type and IndexMapping default configurations. -// -// Finally, mapped values are analyzed, indexed or stored. See -// FieldMapping.Analyzer to know how an analyzer is resolved for a given field. -// -// Examples: -// -// type Date struct { -// Day string `json:"day"` -// Month string -// Year string -// } -// -// type Person struct { -// FirstName string `json:"first_name"` -// LastName string -// BirthDate Date `json:"birth_date"` -// } -// -// A Person value FirstName is mapped by the SubDocumentMapping at -// "first_name". Its LastName is mapped by the one at "LastName". The day of -// BirthDate is mapped to the SubDocumentMapping "day" of the root -// SubDocumentMapping "birth_date". It will appear as the "birth_date.day" -// field in the index. The month is mapped to "birth_date.Month". -type Index interface { - // Index analyzes, indexes or stores mapped data fields. Supplied - // identifier is bound to analyzed data and will be retrieved by search - // requests. See Index interface documentation for details about mapping - // rules. - Index(id string, data interface{}) error - Delete(id string) error - - NewBatch() *Batch - Batch(b *Batch) error - - // Document returns specified document or nil if the document is not - // indexed or stored. - Document(id string) (index.Document, error) - // DocCount returns the number of documents in the index. - DocCount() (uint64, error) - - Search(req *SearchRequest) (*SearchResult, error) - SearchInContext(ctx context.Context, req *SearchRequest) (*SearchResult, error) - - Fields() ([]string, error) - - FieldDict(field string) (index.FieldDict, error) - FieldDictRange(field string, startTerm []byte, endTerm []byte) (index.FieldDict, error) - FieldDictPrefix(field string, termPrefix []byte) (index.FieldDict, error) - - Close() error - - Mapping() mapping.IndexMapping - - Stats() *IndexStat - StatsMap() map[string]interface{} - - GetInternal(key []byte) ([]byte, error) - SetInternal(key, val []byte) error - DeleteInternal(key []byte) error - - // Name returns the name of the index (by default this is the path) - Name() string - // SetName lets you assign your own logical name to this index - SetName(string) - - // Advanced returns the internal index implementation - Advanced() (index.Index, error) -} - -// New index at the specified path, must not exist. -// The provided mapping will be used for all -// Index/Search operations. -func New(path string, mapping mapping.IndexMapping) (Index, error) { - return newIndexUsing(path, mapping, Config.DefaultIndexType, Config.DefaultKVStore, nil) -} - -// NewMemOnly creates a memory-only index. -// The contents of the index is NOT persisted, -// and will be lost once closed. -// The provided mapping will be used for all -// Index/Search operations. -func NewMemOnly(mapping mapping.IndexMapping) (Index, error) { - return newIndexUsing("", mapping, upsidedown.Name, Config.DefaultMemKVStore, nil) -} - -// NewUsing creates index at the specified path, -// which must not already exist. -// The provided mapping will be used for all -// Index/Search operations. -// The specified index type will be used. -// The specified kvstore implementation will be used -// and the provided kvconfig will be passed to its -// constructor. Note that currently the values of kvconfig must -// be able to be marshaled and unmarshaled using the encoding/json library (used -// when reading/writing the index metadata file). -func NewUsing(path string, mapping mapping.IndexMapping, indexType string, kvstore string, kvconfig map[string]interface{}) (Index, error) { - return newIndexUsing(path, mapping, indexType, kvstore, kvconfig) -} - -// Open index at the specified path, must exist. -// The mapping used when it was created will be used for all Index/Search operations. -func Open(path string) (Index, error) { - return openIndexUsing(path, nil) -} - -// OpenUsing opens index at the specified path, must exist. -// The mapping used when it was created will be used for all Index/Search operations. -// The provided runtimeConfig can override settings -// persisted when the kvstore was created. -func OpenUsing(path string, runtimeConfig map[string]interface{}) (Index, error) { - return openIndexUsing(path, runtimeConfig) -} - -// Builder is a limited interface, used to build indexes in an offline mode. -// Items cannot be updated or deleted, and the caller MUST ensure a document is -// indexed only once. -type Builder interface { - Index(id string, data interface{}) error - Close() error -} - -// NewBuilder creates a builder, which will build an index at the specified path, -// using the specified mapping and options. -func NewBuilder(path string, mapping mapping.IndexMapping, config map[string]interface{}) (Builder, error) { - return newBuilder(path, mapping, config) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/README.md b/vendor/github.com/blevesearch/bleve/v2/index/scorch/README.md deleted file mode 100644 index 9794aed70..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/README.md +++ /dev/null @@ -1,367 +0,0 @@ -# scorch - -## Definitions - -Batch -- A collection of Documents to mutate in the index. - -Document -- Has a unique identifier (arbitrary bytes). -- Is comprised of a list of fields. - -Field -- Has a name (string). -- Has a type (text, number, date, geopoint). -- Has a value (depending on type). -- Can be indexed, stored, or both. -- If indexed, can be analyzed. --m If indexed, can optionally store term vectors. - -## Scope - -Scorch *MUST* implement the bleve.index API without requiring any changes to this API. - -Scorch *MAY* introduce new interfaces, which can be discovered to allow use of new capabilities not in the current API. - -## Implementation - -The scorch implementation starts with the concept of a segmented index. - -A segment is simply a slice, subset, or portion of the entire index. A segmented index is one which is composed of one or more segments. Although segments are created in a particular order, knowing this ordering is not required to achieve correct semantics when querying. Because there is no ordering, this means that when searching an index, you can (and should) search all the segments concurrently. - -### Internal Wrapper - -In order to accommodate the existing APIs while also improving the implementation, the scorch implementation includes some wrapper functionality that must be described. - -#### \_id field - -In scorch, field 0 is prearranged to be named \_id. All documents have a value for this field, which is the documents external identifier. In this version the field *MUST* be both indexed AND stored. The scorch wrapper adds this field, as it will not be present in the Document from the calling bleve code. - -NOTE: If a document already contains a field \_id, it will be replaced. If this is problematic, the caller must ensure such a scenario does not happen. - -### Proposed Structures - -``` -type Segment interface { - - Dictionary(field string) TermDictionary - -} - -type TermDictionary interface { - - PostingsList(term string, excluding PostingsList) PostingsList - -} - -type PostingsList interface { - - Next() Posting - - And(other PostingsList) PostingsList - Or(other PostingsList) PostingsList - -} - -type Posting interface { - Number() uint64 - - Frequency() uint64 - Norm() float64 - - Locations() Locations -} - -type Locations interface { - Start() uint64 - End() uint64 - Pos() uint64 - ArrayPositions() ... -} - -type DeletedDocs { - -} - -type SegmentSnapshot struct { - segment Segment - deleted PostingsList -} - -type IndexSnapshot struct { - segment []SegmentSnapshot -} -``` -**What about errors?** -**What about memory mgmnt or context?** -**Postings List separate iterator to separate stateful from stateless** -### Mutating the Index - -The bleve.index API has methods for directly making individual mutations (Update/Delete/SetInternal/DeleteInternal), however for this first implementation, we assume that all of these calls can simply be turned into a Batch of size 1. This may be highly inefficient, but it will be correct. This decision is made based on the fact that Couchbase FTS always uses Batches. - -NOTE: As a side-effect of this decision, it should be clear that performance tuning may depend on the batch size, which may in-turn require changes in FTS. - -From this point forward, only Batch mutations will be discussed. - -Sequence of Operations: - -1. For each document in the batch, search through all existing segments. The goal is to build up a per-segment bitset which tells us which documents in that segment are obsoleted by the addition of the new segment we're currently building. NOTE: we're not ready for this change to take effect yet, so rather than this operation mutating anything, they simply return bitsets, which we can apply later. Logically, this is something like: - - ``` - foreach segment { - dict := segment.Dictionary("\_id") - postings := empty postings list - foreach docID { - postings = postings.Or(dict.PostingsList(docID, nil)) - } - } - ``` - - NOTE: it is illustrated above as nested for loops, but some or all of these could be concurrently. The end result is that for each segment, we have (possibly empty) bitset. - -2. Also concurrent with 1, the documents in the batch are analyzed. This analysis proceeds using the existing analyzer pool. - -3. (after 2 completes) Analyzed documents are fed into a function which builds a new Segment representing this information. - -4. We now have everything we need to update the state of the system to include this new snapshot. - - - Acquire a lock - - Create a new IndexSnapshot - - For each SegmentSnapshot in the IndexSnapshot, take the deleted PostingsList and OR it with the new postings list for this Segment. Construct a new SegmentSnapshot for the segment using this new deleted PostingsList. Append this SegmentSnapshot to the IndexSnapshot. - - Create a new SegmentSnapshot wrapping our new segment with nil deleted docs. - - Append the new SegmentSnapshot to the IndexSnapshot - - Release the lock - -An ASCII art example: - ``` - 0 - Empty Index - - No segments - - IndexSnapshot - segments [] - deleted [] - - - 1 - Index Batch [ A B C ] - - segment 0 - numbers [ 1 2 3 ] - \_id [ A B C ] - - IndexSnapshot - segments [ 0 ] - deleted [ nil ] - - - 2 - Index Batch [ B' ] - - segment 0 1 - numbers [ 1 2 3 ] [ 1 ] - \_id [ A B C ] [ B ] - - Compute bitset segment-0-deleted-by-1: - [ 0 1 0 ] - - OR it with previous (nil) (call it 0-1) - [ 0 1 0 ] - - IndexSnapshot - segments [ 0 1 ] - deleted [ 0-1 nil ] - - 3 - Index Batch [ C' ] - - segment 0 1 2 - numbers [ 1 2 3 ] [ 1 ] [ 1 ] - \_id [ A B C ] [ B ] [ C ] - - Compute bitset segment-0-deleted-by-2: - [ 0 0 1 ] - - OR it with previous ([ 0 1 0 ]) (call it 0-12) - [ 0 1 1 ] - - Compute bitset segment-1-deleted-by-2: - [ 0 ] - - OR it with previous (nil) - still just nil - - - IndexSnapshot - segments [ 0 1 2 ] - deleted [ 0-12 nil nil ] - ``` - -**is there opportunity to stop early when doc is found in one segment** -**also, more efficient way to find bits for long lists of ids?** - -### Searching - -In the bleve.index API all searching starts by getting an IndexReader, which represents a snapshot of the index at a point in time. - -As described in the section above, our index implementation maintains a pointer to the current IndexSnapshot. When a caller gets an IndexReader, they get a copy of this pointer, and can use it as long as they like. The IndexSnapshot contains SegmentSnapshots, which only contain pointers to immutable segments. The deleted posting lists associated with a segment change over time, but the particular deleted posting list in YOUR snapshot is immutable. This gives a stable view of the data. - -#### Term Search - -Term search is the only searching primitive exposed in today's bleve.index API. This ultimately could limit our ability to take advantage of the indexing improvements, but it also means it will be easier to get a first version of this working. - -A term search for term T in field F will look something like this: - -``` - searchResultPostings = empty - foreach segment { - dict := segment.Dictionary(F) - segmentResultPostings = dict.PostingsList(T, segmentSnapshotDeleted) - // make segmentLocal numbers into global numbers, and flip bits in searchResultPostings - } -``` - -The searchResultPostings will be a new implementation of the TermFieldReader inteface. - -As a reminder this interface is: - -``` -// TermFieldReader is the interface exposing the enumeration of documents -// containing a given term in a given field. Documents are returned in byte -// lexicographic order over their identifiers. -type TermFieldReader interface { - // Next returns the next document containing the term in this field, or nil - // when it reaches the end of the enumeration. The preAlloced TermFieldDoc - // is optional, and when non-nil, will be used instead of allocating memory. - Next(preAlloced *TermFieldDoc) (*TermFieldDoc, error) - - // Advance resets the enumeration at specified document or its immediate - // follower. - Advance(ID IndexInternalID, preAlloced *TermFieldDoc) (*TermFieldDoc, error) - - // Count returns the number of documents contains the term in this field. - Count() uint64 - Close() error -} -``` - -At first glance this appears problematic, we have no way to return documents in order of their identifiers. But it turns out the wording of this perhaps too strong, or a bit ambiguous. Originally, this referred to the external identifiers, but with the introduction of a distinction between internal/external identifiers, returning them in order of their internal identifiers is also acceptable. **ASIDE**: the reason for this is that most callers just use Next() and literally don't care what the order is, they could be in any order and it would be fine. There is only one search that cares and that is the ConjunctionSearcher, which relies on Next/Advance having very specific semantics. Later in this document we will have a proposal to split into multiple interfaces: - -- The weakest interface, only supports Next() no ordering at all. -- Ordered, supporting Advance() -- And/Or'able capable of internally efficiently doing these ops with like interfaces (if not capable then can always fall back to external walking) - -But, the good news is that we don't even have to do that for our first implementation. As long as the global numbers we use for internal identifiers are consistent within this IndexSnapshot, then Next() will be ordered by ascending document number, and Advance() will still work correctly. - -NOTE: there is another place where we rely on the ordering of these hits, and that is in the "\_id" sort order. Previously this was the natural order, and a NOOP for the collector, now it must be implemented by actually sorting on the "\_id" field. We probably should introduce at least a marker interface to detect this. - -An ASCII art example: - -``` -Let's start with the IndexSnapshot we ended with earlier: - -3 - Index Batch [ C' ] - - segment 0 1 2 - numbers [ 1 2 3 ] [ 1 ] [ 1 ] - \_id [ A B C ] [ B ] [ C ] - - Compute bitset segment-0-deleted-by-2: - [ 0 0 1 ] - - OR it with previous ([ 0 1 0 ]) (call it 0-12) - [ 0 1 1 ] - -Compute bitset segment-1-deleted-by-2: - [ 0 0 0 ] - -OR it with previous (nil) - still just nil - - - IndexSnapshot - segments [ 0 1 2 ] - deleted [ 0-12 nil nil ] - -Now let's search for the term 'cat' in the field 'desc' and let's assume that Document C (both versions) would match it. - -Concurrently: - - - Segment 0 - - Get Term Dictionary For Field 'desc' - - From it get Postings List for term 'cat' EXCLUDING 0-12 - - raw segment matches [ 0 0 1 ] but excluding [ 0 1 1 ] gives [ 0 0 0 ] - - Segment 1 - - Get Term Dictionary For Field 'desc' - - From it get Postings List for term 'cat' excluding nil - - [ 0 ] - - Segment 2 - - Get Term Dictionary For Field 'desc' - - From it get Postings List for term 'cat' excluding nil - - [ 1 ] - -Map local bitsets into global number space (global meaning cross-segment but still unique to this snapshot) - -IndexSnapshot already should have mapping something like: -0 - Offset 0 -1 - Offset 3 (because segment 0 had 3 docs) -2 - Offset 4 (because segment 1 had 1 doc) - -This maps to search result bitset: - -[ 0 0 0 0 1] - -Caller would call Next() and get doc number 5 (assuming 1 based indexing for now) - -Caller could then ask to get term locations, stored fields, external doc ID for document number 5. Internally in the IndexSnapshot, we can now convert that back, and realize doc number 5 comes from segment 2, 5-4=1 so we're looking for doc number 1 in segment 2. That happens to be C... - -``` - -#### Future improvements - -In the future, interfaces to detect these non-serially operating TermFieldReaders could expose their own And() and Or() up to the higher level Conjunction/Disjunction searchers. Doing this alone offers some win, but also means there would be greater burden on the Searcher code rewriting logical expressions for maximum performance. - -Another related topic is that of peak memory usage. With serially operating TermFieldReaders it was necessary to start them all at the same time and operate in unison. However, with these non-serially operating TermFieldReaders we have the option of doing a few at a time, consolidating them, dispoting the intermediaries, and then doing a few more. For very complex queries with many clauses this could reduce peak memory usage. - - -### Memory Tracking - -All segments must be able to produce two statistics, an estimate of their explicit memory usage, and their actual size on disk (if any). For in-memory segments, disk usage could be zero, and the memory usage represents the entire information content. For mmap-based disk segments, the memory could be as low as the size of tracking structure itself (say just a few pointers). - -This would allow the implementation to throttle or block incoming mutations when a threshold memory usage has (or would be) exceeded. - -### Persistence - -Obviously, we want to support (but maybe not require) asynchronous persistence of segments. My expectation is that segments are initially built in memory. At some point they are persisted to disk. This poses some interesting challenges. - -At runtime, the state of an index (it's IndexSnapshot) is not only the contents of the segments, but also the bitmasks of deleted documents. These bitmasks indirectly encode an ordering in which the segments were added. The reason is that the bitmasks encode which items have been obsoleted by other (subsequent or more future) segments. In the runtime implementation we compute bitmask deltas and then merge them at the same time we bring the new segment in. One idea is that we could take a similar approach on disk. When we persist a segment, we persist the bitmask deltas of segments known to exist at that time, and eventually these can get merged up into a base segment deleted bitmask. - -This also relates to the topic rollback, addressed next... - - -### Rollback - -One desirable property in the Couchbase ecosystem is the ability to rollback to some previous (though typically not long ago) state. One idea for keeping this property in this design is to protect some of the most recent segments from merging. Then, if necessary, they could be "undone" to reveal previous states of the system. In these scenarios "undone" has to properly undo the deleted bitmasks on the other segments. Again, the current thinking is that rather than "undo" anything, it could be work that was deferred in the first place, thus making it easier to logically undo. - -Another possibly related approach would be to tie this into our existing snapshot mechanism. Perhaps simulating a slow reader (holding onto index snapshots) for some period of time, can be the mechanism to achieve the desired end goal. - - -### Internal Storage - -The bleve.index API has support for "internal storage". The ability to store information under a separate name space. - -This is not used for high volume storage, so it is tempting to think we could just put a small k/v store alongside the rest of the index. But, the reality is that this storage is used to maintain key information related to the rollback scenario. Because of this, its crucial that ordering and overwriting of key/value pairs correspond with actual segment persistence in the index. Based on this, I believe its important to put the internal key/value pairs inside the segments themselves. But, this also means that they must follow a similar "deleted" bitmask approach to obsolete values in older segments. But, this also seems to substantially increase the complexity of the solution because of the separate name space, it would appear to require its own bitmask. Further keys aren't numeric, which then implies yet another mapping from internal key to number, etc. - -More thought is required here. - -### Merging - -The segmented index approach requires merging to prevent the number of segments from growing too large. - -Recent experience with LSMs has taught us that having the correct merge strategy can make a huge difference in the overall performance of the system. In particular, a simple merge strategy which merges segments too aggressively can lead to high write amplification and unnecessarily rendering cached data useless. - -A few simple principles have been identified. - -- Roughly we merge multiple smaller segments into a single larger one. -- The larger a segment gets the less likely we should be to ever merge it. -- Segments with large numbers of deleted/obsoleted items are good candidates as the merge will result in a space savings. -- Segments with all items deleted/obsoleted can be dropped. - -Merging of a segment should be able to proceed even if that segment is held by an ongoing snapshot, it should only delay the removal of it. diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/builder.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/builder.go deleted file mode 100644 index 3311bd02e..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/builder.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright (c) 2019 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "fmt" - "io/ioutil" - "os" - "sync" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - bolt "go.etcd.io/bbolt" -) - -const DefaultBuilderBatchSize = 1000 -const DefaultBuilderMergeMax = 10 - -type Builder struct { - m sync.Mutex - segCount uint64 - path string - buildPath string - segPaths []string - batchSize int - mergeMax int - batch *index.Batch - internal map[string][]byte - segPlugin SegmentPlugin -} - -func NewBuilder(config map[string]interface{}) (*Builder, error) { - path, ok := config["path"].(string) - if !ok { - return nil, fmt.Errorf("must specify path") - } - - buildPathPrefix, _ := config["buildPathPrefix"].(string) - buildPath, err := ioutil.TempDir(buildPathPrefix, "scorch-offline-build") - if err != nil { - return nil, err - } - - rv := &Builder{ - path: path, - buildPath: buildPath, - mergeMax: DefaultBuilderMergeMax, - batchSize: DefaultBuilderBatchSize, - batch: index.NewBatch(), - segPlugin: defaultSegmentPlugin, - } - - err = rv.parseConfig(config) - if err != nil { - return nil, fmt.Errorf("error parsing builder config: %v", err) - } - - return rv, nil -} - -func (o *Builder) parseConfig(config map[string]interface{}) (err error) { - if v, ok := config["mergeMax"]; ok { - var t int - if t, err = parseToInteger(v); err != nil { - return fmt.Errorf("mergeMax parse err: %v", err) - } - if t > 0 { - o.mergeMax = t - } - } - - if v, ok := config["batchSize"]; ok { - var t int - if t, err = parseToInteger(v); err != nil { - return fmt.Errorf("batchSize parse err: %v", err) - } - if t > 0 { - o.batchSize = t - } - } - - if v, ok := config["internal"]; ok { - if vinternal, ok := v.(map[string][]byte); ok { - o.internal = vinternal - } - } - - forcedSegmentType, forcedSegmentVersion, err := configForceSegmentTypeVersion(config) - if err != nil { - return err - } - if forcedSegmentType != "" && forcedSegmentVersion != 0 { - segPlugin, err := chooseSegmentPlugin(forcedSegmentType, - uint32(forcedSegmentVersion)) - if err != nil { - return err - } - o.segPlugin = segPlugin - } - - return nil -} - -// Index will place the document into the index. -// It is invalid to index the same document multiple times. -func (o *Builder) Index(doc index.Document) error { - o.m.Lock() - defer o.m.Unlock() - - o.batch.Update(doc) - - return o.maybeFlushBatchLOCKED(o.batchSize) -} - -func (o *Builder) maybeFlushBatchLOCKED(moreThan int) error { - if len(o.batch.IndexOps) >= moreThan { - defer o.batch.Reset() - return o.executeBatchLOCKED(o.batch) - } - return nil -} - -func (o *Builder) executeBatchLOCKED(batch *index.Batch) (err error) { - analysisResults := make([]index.Document, 0, len(batch.IndexOps)) - for _, doc := range batch.IndexOps { - if doc != nil { - // insert _id field - doc.AddIDField() - // perform analysis directly - analyze(doc) - analysisResults = append(analysisResults, doc) - } - } - - seg, _, err := o.segPlugin.New(analysisResults) - if err != nil { - return fmt.Errorf("error building segment base: %v", err) - } - - filename := zapFileName(o.segCount) - o.segCount++ - path := o.buildPath + string(os.PathSeparator) + filename - - if segUnpersisted, ok := seg.(segment.UnpersistedSegment); ok { - err = segUnpersisted.Persist(path) - if err != nil { - return fmt.Errorf("error persisting segment base to %s: %v", path, err) - } - - o.segPaths = append(o.segPaths, path) - return nil - } - - return fmt.Errorf("new segment does not implement unpersisted: %T", seg) -} - -func (o *Builder) doMerge() error { - // as long as we have more than 1 segment, keep merging - for len(o.segPaths) > 1 { - - // merge the next number of segments into one new one - // or, if there are fewer than remaining, merge them all - mergeCount := o.mergeMax - if mergeCount > len(o.segPaths) { - mergeCount = len(o.segPaths) - } - - mergePaths := o.segPaths[0:mergeCount] - o.segPaths = o.segPaths[mergeCount:] - - // open each of the segments to be merged - mergeSegs := make([]segment.Segment, 0, mergeCount) - - // closeOpenedSegs attempts to close all opened - // segments even if an error occurs, in which case - // the first error is returned - closeOpenedSegs := func() error { - var err error - for _, seg := range mergeSegs { - clErr := seg.Close() - if clErr != nil && err == nil { - err = clErr - } - } - return err - } - - for _, mergePath := range mergePaths { - seg, err := o.segPlugin.Open(mergePath) - if err != nil { - _ = closeOpenedSegs() - return fmt.Errorf("error opening segment (%s) for merge: %v", mergePath, err) - } - mergeSegs = append(mergeSegs, seg) - } - - // do the merge - mergedSegPath := o.buildPath + string(os.PathSeparator) + zapFileName(o.segCount) - drops := make([]*roaring.Bitmap, mergeCount) - _, _, err := o.segPlugin.Merge(mergeSegs, drops, mergedSegPath, nil, nil) - if err != nil { - _ = closeOpenedSegs() - return fmt.Errorf("error merging segments (%v): %v", mergePaths, err) - } - o.segCount++ - o.segPaths = append(o.segPaths, mergedSegPath) - - // close segments opened for merge - err = closeOpenedSegs() - if err != nil { - return fmt.Errorf("error closing opened segments: %v", err) - } - - // remove merged segments - for _, mergePath := range mergePaths { - err = os.RemoveAll(mergePath) - if err != nil { - return fmt.Errorf("error removing segment %s after merge: %v", mergePath, err) - } - } - } - - return nil -} - -func (o *Builder) Close() error { - o.m.Lock() - defer o.m.Unlock() - - // see if there is a partial batch - err := o.maybeFlushBatchLOCKED(1) - if err != nil { - return fmt.Errorf("error flushing batch before close: %v", err) - } - - // perform all the merging - err = o.doMerge() - if err != nil { - return fmt.Errorf("error while merging: %v", err) - } - - // ensure the store path exists - err = os.MkdirAll(o.path, 0700) - if err != nil { - return err - } - - // move final segment into place - // segment id 2 is chosen to match the behavior of a scorch - // index which indexes a single batch of data - finalSegPath := o.path + string(os.PathSeparator) + zapFileName(2) - err = os.Rename(o.segPaths[0], finalSegPath) - if err != nil { - return fmt.Errorf("error moving final segment into place: %v", err) - } - - // remove the buildPath, as it is no longer needed - err = os.RemoveAll(o.buildPath) - if err != nil { - return fmt.Errorf("error removing build path: %v", err) - } - - // prepare wrapping - seg, err := o.segPlugin.Open(finalSegPath) - if err != nil { - return fmt.Errorf("error opening final segment") - } - - // create a segment snapshot for this segment - ss := &SegmentSnapshot{ - segment: seg, - } - is := &IndexSnapshot{ - epoch: 3, // chosen to match scorch behavior when indexing a single batch - segment: []*SegmentSnapshot{ss}, - creator: "scorch-builder", - internal: o.internal, - } - - // create the root bolt - rootBoltPath := o.path + string(os.PathSeparator) + "root.bolt" - rootBolt, err := bolt.Open(rootBoltPath, 0600, nil) - if err != nil { - return err - } - - // start a write transaction - tx, err := rootBolt.Begin(true) - if err != nil { - return err - } - - // fill the root bolt with this fake index snapshot - _, _, err = prepareBoltSnapshot(is, tx, o.path, o.segPlugin) - if err != nil { - _ = tx.Rollback() - _ = rootBolt.Close() - return fmt.Errorf("error preparing bolt snapshot in root.bolt: %v", err) - } - - // commit bolt data - err = tx.Commit() - if err != nil { - _ = rootBolt.Close() - return fmt.Errorf("error committing bolt tx in root.bolt: %v", err) - } - - // close bolt - err = rootBolt.Close() - if err != nil { - return fmt.Errorf("error closing root.bolt: %v", err) - } - - // close final segment - err = seg.Close() - if err != nil { - return fmt.Errorf("error closing final segment: %v", err) - } - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/event.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/event.go deleted file mode 100644 index 8f3fc1914..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/event.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import "time" - -// RegistryAsyncErrorCallbacks should be treated as read-only after -// process init()'ialization. -var RegistryAsyncErrorCallbacks = map[string]func(error){} - -// RegistryEventCallbacks should be treated as read-only after -// process init()'ialization. -var RegistryEventCallbacks = map[string]func(Event){} - -// Event represents the information provided in an OnEvent() callback. -type Event struct { - Kind EventKind - Scorch *Scorch - Duration time.Duration -} - -// EventKind represents an event code for OnEvent() callbacks. -type EventKind int - -// EventKindCloseStart is fired when a Scorch.Close() has begun. -var EventKindCloseStart = EventKind(1) - -// EventKindClose is fired when a scorch index has been fully closed. -var EventKindClose = EventKind(2) - -// EventKindMergerProgress is fired when the merger has completed a -// round of merge processing. -var EventKindMergerProgress = EventKind(3) - -// EventKindPersisterProgress is fired when the persister has completed -// a round of persistence processing. -var EventKindPersisterProgress = EventKind(4) - -// EventKindBatchIntroductionStart is fired when Batch() is invoked which -// introduces a new segment. -var EventKindBatchIntroductionStart = EventKind(5) - -// EventKindBatchIntroduction is fired when Batch() completes. -var EventKindBatchIntroduction = EventKind(6) - -// EventKindMergeTaskIntroductionStart is fired when the merger is about to -// start the introduction of merged segment from a single merge task. -var EventKindMergeTaskIntroductionStart = EventKind(7) - -// EventKindMergeTaskIntroduction is fired when the merger has completed -// the introduction of merged segment from a single merge task. -var EventKindMergeTaskIntroduction = EventKind(8) diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/int.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/int.go deleted file mode 100644 index 4fa6d7f71..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/int.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2014 The Cockroach Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -// This code originated from: -// https://github.com/cockroachdb/cockroach/blob/2dd65dde5d90c157f4b93f92502ca1063b904e1d/pkg/util/encoding/encoding.go - -// Modified to not use pkg/errors - -package scorch - -import "fmt" - -const ( - // intMin is chosen such that the range of int tags does not overlap the - // ascii character set that is frequently used in testing. - intMin = 0x80 // 128 - intMaxWidth = 8 - intZero = intMin + intMaxWidth // 136 - intSmall = intMax - intZero - intMaxWidth // 109 - // intMax is the maximum int tag value. - intMax = 0xfd // 253 -) - -// encodeUvarintAscending encodes the uint64 value using a variable length -// (length-prefixed) representation. The length is encoded as a single -// byte indicating the number of encoded bytes (-8) to follow. See -// EncodeVarintAscending for rationale. The encoded bytes are appended to the -// supplied buffer and the final buffer is returned. -func encodeUvarintAscending(b []byte, v uint64) []byte { - switch { - case v <= intSmall: - return append(b, intZero+byte(v)) - case v <= 0xff: - return append(b, intMax-7, byte(v)) - case v <= 0xffff: - return append(b, intMax-6, byte(v>>8), byte(v)) - case v <= 0xffffff: - return append(b, intMax-5, byte(v>>16), byte(v>>8), byte(v)) - case v <= 0xffffffff: - return append(b, intMax-4, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) - case v <= 0xffffffffff: - return append(b, intMax-3, byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), - byte(v)) - case v <= 0xffffffffffff: - return append(b, intMax-2, byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), - byte(v>>8), byte(v)) - case v <= 0xffffffffffffff: - return append(b, intMax-1, byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), - byte(v>>16), byte(v>>8), byte(v)) - default: - return append(b, intMax, byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), - byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) - } -} - -// decodeUvarintAscending decodes a varint encoded uint64 from the input -// buffer. The remainder of the input buffer and the decoded uint64 -// are returned. -func decodeUvarintAscending(b []byte) ([]byte, uint64, error) { - if len(b) == 0 { - return nil, 0, fmt.Errorf("insufficient bytes to decode uvarint value") - } - length := int(b[0]) - intZero - b = b[1:] // skip length byte - if length <= intSmall { - return b, uint64(length), nil - } - length -= intSmall - if length < 0 || length > 8 { - return nil, 0, fmt.Errorf("invalid uvarint length of %d", length) - } else if len(b) < length { - return nil, 0, fmt.Errorf("insufficient bytes to decode uvarint value: %q", b) - } - var v uint64 - // It is faster to range over the elements in a slice than to index - // into the slice on each loop iteration. - for _, t := range b[:length] { - v = (v << 8) | uint64(t) - } - return b[length:], v, nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/introducer.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/introducer.go deleted file mode 100644 index 8516d411d..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/introducer.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "fmt" - "sync/atomic" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -type segmentIntroduction struct { - id uint64 - data segment.Segment - obsoletes map[uint64]*roaring.Bitmap - ids []string - internal map[string][]byte - - applied chan error - persisted chan error - persistedCallback index.BatchCallback -} - -type persistIntroduction struct { - persisted map[uint64]segment.Segment - applied notificationChan -} - -type epochWatcher struct { - epoch uint64 - notifyCh notificationChan -} - -func (s *Scorch) introducerLoop() { - var epochWatchers []*epochWatcher -OUTER: - for { - atomic.AddUint64(&s.stats.TotIntroduceLoop, 1) - - select { - case <-s.closeCh: - break OUTER - - case epochWatcher := <-s.introducerNotifier: - epochWatchers = append(epochWatchers, epochWatcher) - - case nextMerge := <-s.merges: - s.introduceMerge(nextMerge) - - case next := <-s.introductions: - err := s.introduceSegment(next) - if err != nil { - continue OUTER - } - - case persist := <-s.persists: - s.introducePersist(persist) - - } - - var epochCurr uint64 - s.rootLock.RLock() - if s.root != nil { - epochCurr = s.root.epoch - } - s.rootLock.RUnlock() - var epochWatchersNext []*epochWatcher - for _, w := range epochWatchers { - if w.epoch < epochCurr { - close(w.notifyCh) - } else { - epochWatchersNext = append(epochWatchersNext, w) - } - } - epochWatchers = epochWatchersNext - } - - s.asyncTasks.Done() -} - -func (s *Scorch) introduceSegment(next *segmentIntroduction) error { - atomic.AddUint64(&s.stats.TotIntroduceSegmentBeg, 1) - defer atomic.AddUint64(&s.stats.TotIntroduceSegmentEnd, 1) - - s.rootLock.RLock() - root := s.root - root.AddRef() - s.rootLock.RUnlock() - - defer func() { _ = root.DecRef() }() - - nsegs := len(root.segment) - - // prepare new index snapshot - newSnapshot := &IndexSnapshot{ - parent: s, - segment: make([]*SegmentSnapshot, 0, nsegs+1), - offsets: make([]uint64, 0, nsegs+1), - internal: make(map[string][]byte, len(root.internal)), - refs: 1, - creator: "introduceSegment", - } - - // iterate through current segments - var running uint64 - var docsToPersistCount, memSegments, fileSegments uint64 - for i := range root.segment { - // see if optimistic work included this segment - delta, ok := next.obsoletes[root.segment[i].id] - if !ok { - var err error - delta, err = root.segment[i].segment.DocNumbers(next.ids) - if err != nil { - next.applied <- fmt.Errorf("error computing doc numbers: %v", err) - close(next.applied) - _ = newSnapshot.DecRef() - return err - } - } - - newss := &SegmentSnapshot{ - id: root.segment[i].id, - segment: root.segment[i].segment, - cachedDocs: root.segment[i].cachedDocs, - creator: root.segment[i].creator, - } - - // apply new obsoletions - if root.segment[i].deleted == nil { - newss.deleted = delta - } else { - newss.deleted = roaring.Or(root.segment[i].deleted, delta) - } - if newss.deleted.IsEmpty() { - newss.deleted = nil - } - - // check for live size before copying - if newss.LiveSize() > 0 { - newSnapshot.segment = append(newSnapshot.segment, newss) - root.segment[i].segment.AddRef() - newSnapshot.offsets = append(newSnapshot.offsets, running) - running += newss.segment.Count() - } - - if isMemorySegment(root.segment[i]) { - docsToPersistCount += root.segment[i].Count() - memSegments++ - } else { - fileSegments++ - } - } - - atomic.StoreUint64(&s.stats.TotItemsToPersist, docsToPersistCount) - atomic.StoreUint64(&s.stats.TotMemorySegmentsAtRoot, memSegments) - atomic.StoreUint64(&s.stats.TotFileSegmentsAtRoot, fileSegments) - - // append new segment, if any, to end of the new index snapshot - if next.data != nil { - newSegmentSnapshot := &SegmentSnapshot{ - id: next.id, - segment: next.data, // take ownership of next.data's ref-count - cachedDocs: &cachedDocs{cache: nil}, - creator: "introduceSegment", - } - newSnapshot.segment = append(newSnapshot.segment, newSegmentSnapshot) - newSnapshot.offsets = append(newSnapshot.offsets, running) - - // increment numItemsIntroduced which tracks the number of items - // queued for persistence. - atomic.AddUint64(&s.stats.TotIntroducedItems, newSegmentSnapshot.Count()) - atomic.AddUint64(&s.stats.TotIntroducedSegmentsBatch, 1) - } - // copy old values - for key, oldVal := range root.internal { - newSnapshot.internal[key] = oldVal - } - // set new values and apply deletes - for key, newVal := range next.internal { - if newVal != nil { - newSnapshot.internal[key] = newVal - } else { - delete(newSnapshot.internal, key) - } - } - - newSnapshot.updateSize() - s.rootLock.Lock() - if next.persisted != nil { - s.rootPersisted = append(s.rootPersisted, next.persisted) - } - if next.persistedCallback != nil { - s.persistedCallbacks = append(s.persistedCallbacks, next.persistedCallback) - } - // swap in new index snapshot - newSnapshot.epoch = s.nextSnapshotEpoch - s.nextSnapshotEpoch++ - rootPrev := s.root - s.root = newSnapshot - atomic.StoreUint64(&s.stats.CurRootEpoch, s.root.epoch) - // release lock - s.rootLock.Unlock() - - if rootPrev != nil { - _ = rootPrev.DecRef() - } - - close(next.applied) - - return nil -} - -func (s *Scorch) introducePersist(persist *persistIntroduction) { - atomic.AddUint64(&s.stats.TotIntroducePersistBeg, 1) - defer atomic.AddUint64(&s.stats.TotIntroducePersistEnd, 1) - - s.rootLock.Lock() - root := s.root - root.AddRef() - nextSnapshotEpoch := s.nextSnapshotEpoch - s.nextSnapshotEpoch++ - s.rootLock.Unlock() - - defer func() { _ = root.DecRef() }() - - newIndexSnapshot := &IndexSnapshot{ - parent: s, - epoch: nextSnapshotEpoch, - segment: make([]*SegmentSnapshot, len(root.segment)), - offsets: make([]uint64, len(root.offsets)), - internal: make(map[string][]byte, len(root.internal)), - refs: 1, - creator: "introducePersist", - } - - var docsToPersistCount, memSegments, fileSegments uint64 - for i, segmentSnapshot := range root.segment { - // see if this segment has been replaced - if replacement, ok := persist.persisted[segmentSnapshot.id]; ok { - newSegmentSnapshot := &SegmentSnapshot{ - id: segmentSnapshot.id, - segment: replacement, - deleted: segmentSnapshot.deleted, - cachedDocs: segmentSnapshot.cachedDocs, - creator: "introducePersist", - } - newIndexSnapshot.segment[i] = newSegmentSnapshot - delete(persist.persisted, segmentSnapshot.id) - - // update items persisted incase of a new segment snapshot - atomic.AddUint64(&s.stats.TotPersistedItems, newSegmentSnapshot.Count()) - atomic.AddUint64(&s.stats.TotPersistedSegments, 1) - fileSegments++ - } else { - newIndexSnapshot.segment[i] = root.segment[i] - newIndexSnapshot.segment[i].segment.AddRef() - - if isMemorySegment(root.segment[i]) { - docsToPersistCount += root.segment[i].Count() - memSegments++ - } else { - fileSegments++ - } - } - newIndexSnapshot.offsets[i] = root.offsets[i] - } - - for k, v := range root.internal { - newIndexSnapshot.internal[k] = v - } - - atomic.StoreUint64(&s.stats.TotItemsToPersist, docsToPersistCount) - atomic.StoreUint64(&s.stats.TotMemorySegmentsAtRoot, memSegments) - atomic.StoreUint64(&s.stats.TotFileSegmentsAtRoot, fileSegments) - newIndexSnapshot.updateSize() - s.rootLock.Lock() - rootPrev := s.root - s.root = newIndexSnapshot - atomic.StoreUint64(&s.stats.CurRootEpoch, s.root.epoch) - s.rootLock.Unlock() - - if rootPrev != nil { - _ = rootPrev.DecRef() - } - - close(persist.applied) -} - -// The introducer should definitely handle the segmentMerge.notify -// channel before exiting the introduceMerge. -func (s *Scorch) introduceMerge(nextMerge *segmentMerge) { - atomic.AddUint64(&s.stats.TotIntroduceMergeBeg, 1) - defer atomic.AddUint64(&s.stats.TotIntroduceMergeEnd, 1) - - s.rootLock.RLock() - root := s.root - root.AddRef() - s.rootLock.RUnlock() - - defer func() { _ = root.DecRef() }() - - newSnapshot := &IndexSnapshot{ - parent: s, - internal: root.internal, - refs: 1, - creator: "introduceMerge", - } - - // iterate through current segments - newSegmentDeleted := roaring.NewBitmap() - var running, docsToPersistCount, memSegments, fileSegments uint64 - for i := range root.segment { - segmentID := root.segment[i].id - if segSnapAtMerge, ok := nextMerge.old[segmentID]; ok { - // this segment is going away, see if anything else was deleted since we started the merge - if segSnapAtMerge != nil && root.segment[i].deleted != nil { - // assume all these deletes are new - deletedSince := root.segment[i].deleted - // if we already knew about some of them, remove - if segSnapAtMerge.deleted != nil { - deletedSince = roaring.AndNot(root.segment[i].deleted, segSnapAtMerge.deleted) - } - deletedSinceItr := deletedSince.Iterator() - for deletedSinceItr.HasNext() { - oldDocNum := deletedSinceItr.Next() - newDocNum := nextMerge.oldNewDocNums[segmentID][oldDocNum] - newSegmentDeleted.Add(uint32(newDocNum)) - } - } - // clean up the old segment map to figure out the - // obsolete segments wrt root in meantime, whatever - // segments left behind in old map after processing - // the root segments would be the obsolete segment set - delete(nextMerge.old, segmentID) - } else if root.segment[i].LiveSize() > 0 { - // this segment is staying - newSnapshot.segment = append(newSnapshot.segment, &SegmentSnapshot{ - id: root.segment[i].id, - segment: root.segment[i].segment, - deleted: root.segment[i].deleted, - cachedDocs: root.segment[i].cachedDocs, - creator: root.segment[i].creator, - }) - root.segment[i].segment.AddRef() - newSnapshot.offsets = append(newSnapshot.offsets, running) - running += root.segment[i].segment.Count() - - if isMemorySegment(root.segment[i]) { - docsToPersistCount += root.segment[i].Count() - memSegments++ - } else { - fileSegments++ - } - } - - } - - // before the newMerge introduction, need to clean the newly - // merged segment wrt the current root segments, hence - // applying the obsolete segment contents to newly merged segment - for segID, ss := range nextMerge.old { - obsoleted := ss.DocNumbersLive() - if obsoleted != nil { - obsoletedIter := obsoleted.Iterator() - for obsoletedIter.HasNext() { - oldDocNum := obsoletedIter.Next() - newDocNum := nextMerge.oldNewDocNums[segID][oldDocNum] - newSegmentDeleted.Add(uint32(newDocNum)) - } - } - } - var skipped bool - // In case where all the docs in the newly merged segment getting - // deleted by the time we reach here, can skip the introduction. - if nextMerge.new != nil && - nextMerge.new.Count() > newSegmentDeleted.GetCardinality() { - // put new segment at end - newSnapshot.segment = append(newSnapshot.segment, &SegmentSnapshot{ - id: nextMerge.id, - segment: nextMerge.new, // take ownership for nextMerge.new's ref-count - deleted: newSegmentDeleted, - cachedDocs: &cachedDocs{cache: nil}, - creator: "introduceMerge", - }) - newSnapshot.offsets = append(newSnapshot.offsets, running) - atomic.AddUint64(&s.stats.TotIntroducedSegmentsMerge, 1) - - switch nextMerge.new.(type) { - case segment.PersistedSegment: - fileSegments++ - default: - docsToPersistCount += nextMerge.new.Count() - newSegmentDeleted.GetCardinality() - memSegments++ - } - } else { - skipped = true - atomic.AddUint64(&s.stats.TotFileMergeIntroductionsObsoleted, 1) - } - - atomic.StoreUint64(&s.stats.TotItemsToPersist, docsToPersistCount) - atomic.StoreUint64(&s.stats.TotMemorySegmentsAtRoot, memSegments) - atomic.StoreUint64(&s.stats.TotFileSegmentsAtRoot, fileSegments) - - newSnapshot.AddRef() // 1 ref for the nextMerge.notify response - - newSnapshot.updateSize() - s.rootLock.Lock() - // swap in new index snapshot - newSnapshot.epoch = s.nextSnapshotEpoch - s.nextSnapshotEpoch++ - rootPrev := s.root - s.root = newSnapshot - atomic.StoreUint64(&s.stats.CurRootEpoch, s.root.epoch) - // release lock - s.rootLock.Unlock() - - if rootPrev != nil { - _ = rootPrev.DecRef() - } - - // notify requester that we incorporated this - nextMerge.notifyCh <- &mergeTaskIntroStatus{ - indexSnapshot: newSnapshot, - skipped: skipped} - close(nextMerge.notifyCh) -} - -func isMemorySegment(s *SegmentSnapshot) bool { - switch s.segment.(type) { - case segment.PersistedSegment: - return false - default: - return true - } -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/merge.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/merge.go deleted file mode 100644 index 1e269afc6..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/merge.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "context" - "encoding/json" - "fmt" - "os" - "strings" - "sync/atomic" - "time" - - "github.com/RoaringBitmap/roaring" - "github.com/blevesearch/bleve/v2/index/scorch/mergeplan" - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -func (s *Scorch) mergerLoop() { - var lastEpochMergePlanned uint64 - var ctrlMsg *mergerCtrl - mergePlannerOptions, err := s.parseMergePlannerOptions() - if err != nil { - s.fireAsyncError(fmt.Errorf("mergePlannerOption json parsing err: %v", err)) - s.asyncTasks.Done() - return - } - ctrlMsgDflt := &mergerCtrl{ctx: context.Background(), - options: mergePlannerOptions, - doneCh: nil} - -OUTER: - for { - atomic.AddUint64(&s.stats.TotFileMergeLoopBeg, 1) - - select { - case <-s.closeCh: - break OUTER - - default: - // check to see if there is a new snapshot to persist - s.rootLock.Lock() - ourSnapshot := s.root - ourSnapshot.AddRef() - atomic.StoreUint64(&s.iStats.mergeSnapshotSize, uint64(ourSnapshot.Size())) - atomic.StoreUint64(&s.iStats.mergeEpoch, ourSnapshot.epoch) - s.rootLock.Unlock() - - if ctrlMsg == nil && ourSnapshot.epoch != lastEpochMergePlanned { - ctrlMsg = ctrlMsgDflt - } - if ctrlMsg != nil { - startTime := time.Now() - - // lets get started - err := s.planMergeAtSnapshot(ctrlMsg.ctx, ctrlMsg.options, - ourSnapshot) - if err != nil { - atomic.StoreUint64(&s.iStats.mergeEpoch, 0) - if err == segment.ErrClosed { - // index has been closed - _ = ourSnapshot.DecRef() - - // continue the workloop on a user triggered cancel - if ctrlMsg.doneCh != nil { - close(ctrlMsg.doneCh) - ctrlMsg = nil - continue OUTER - } - - // exit the workloop on index closure - ctrlMsg = nil - break OUTER - } - s.fireAsyncError(fmt.Errorf("merging err: %v", err)) - _ = ourSnapshot.DecRef() - atomic.AddUint64(&s.stats.TotFileMergeLoopErr, 1) - continue OUTER - } - - if ctrlMsg.doneCh != nil { - close(ctrlMsg.doneCh) - } - ctrlMsg = nil - - lastEpochMergePlanned = ourSnapshot.epoch - - atomic.StoreUint64(&s.stats.LastMergedEpoch, ourSnapshot.epoch) - - s.fireEvent(EventKindMergerProgress, time.Since(startTime)) - } - _ = ourSnapshot.DecRef() - - // tell the persister we're waiting for changes - // first make a epochWatcher chan - ew := &epochWatcher{ - epoch: lastEpochMergePlanned, - notifyCh: make(notificationChan, 1), - } - - // give it to the persister - select { - case <-s.closeCh: - break OUTER - case s.persisterNotifier <- ew: - case ctrlMsg = <-s.forceMergeRequestCh: - continue OUTER - } - - // now wait for persister (but also detect close) - select { - case <-s.closeCh: - break OUTER - case <-ew.notifyCh: - case ctrlMsg = <-s.forceMergeRequestCh: - } - } - - atomic.AddUint64(&s.stats.TotFileMergeLoopEnd, 1) - } - - s.asyncTasks.Done() -} - -type mergerCtrl struct { - ctx context.Context - options *mergeplan.MergePlanOptions - doneCh chan struct{} -} - -// ForceMerge helps users trigger a merge operation on -// an online scorch index. -func (s *Scorch) ForceMerge(ctx context.Context, - mo *mergeplan.MergePlanOptions) error { - // check whether force merge is already under processing - s.rootLock.Lock() - if s.stats.TotFileMergeForceOpsStarted > - s.stats.TotFileMergeForceOpsCompleted { - s.rootLock.Unlock() - return fmt.Errorf("force merge already in progress") - } - - s.stats.TotFileMergeForceOpsStarted++ - s.rootLock.Unlock() - - if mo != nil { - err := mergeplan.ValidateMergePlannerOptions(mo) - if err != nil { - return err - } - } else { - // assume the default single segment merge policy - mo = &mergeplan.SingleSegmentMergePlanOptions - } - msg := &mergerCtrl{options: mo, - doneCh: make(chan struct{}), - ctx: ctx, - } - - // request the merger perform a force merge - select { - case s.forceMergeRequestCh <- msg: - case <-s.closeCh: - return nil - } - - // wait for the force merge operation completion - select { - case <-msg.doneCh: - atomic.AddUint64(&s.stats.TotFileMergeForceOpsCompleted, 1) - case <-s.closeCh: - } - - return nil -} - -func (s *Scorch) parseMergePlannerOptions() (*mergeplan.MergePlanOptions, - error) { - mergePlannerOptions := mergeplan.DefaultMergePlanOptions - if v, ok := s.config["scorchMergePlanOptions"]; ok { - b, err := json.Marshal(v) - if err != nil { - return &mergePlannerOptions, err - } - - err = json.Unmarshal(b, &mergePlannerOptions) - if err != nil { - return &mergePlannerOptions, err - } - - err = mergeplan.ValidateMergePlannerOptions(&mergePlannerOptions) - if err != nil { - return nil, err - } - } - return &mergePlannerOptions, nil -} - -type closeChWrapper struct { - ch1 chan struct{} - ctx context.Context - closeCh chan struct{} -} - -func newCloseChWrapper(ch1 chan struct{}, - ctx context.Context) *closeChWrapper { - return &closeChWrapper{ch1: ch1, - ctx: ctx, - closeCh: make(chan struct{})} -} - -func (w *closeChWrapper) close() { - select { - case <-w.closeCh: - default: - close(w.closeCh) - } -} - -func (w *closeChWrapper) listen() { - select { - case <-w.ch1: - w.close() - case <-w.ctx.Done(): - w.close() - case <-w.closeCh: - } -} - -func (s *Scorch) planMergeAtSnapshot(ctx context.Context, - options *mergeplan.MergePlanOptions, ourSnapshot *IndexSnapshot) error { - // build list of persisted segments in this snapshot - var onlyPersistedSnapshots []mergeplan.Segment - for _, segmentSnapshot := range ourSnapshot.segment { - if _, ok := segmentSnapshot.segment.(segment.PersistedSegment); ok { - onlyPersistedSnapshots = append(onlyPersistedSnapshots, segmentSnapshot) - } - } - - atomic.AddUint64(&s.stats.TotFileMergePlan, 1) - - // give this list to the planner - resultMergePlan, err := mergeplan.Plan(onlyPersistedSnapshots, options) - if err != nil { - atomic.AddUint64(&s.stats.TotFileMergePlanErr, 1) - return fmt.Errorf("merge planning err: %v", err) - } - if resultMergePlan == nil { - // nothing to do - atomic.AddUint64(&s.stats.TotFileMergePlanNone, 1) - return nil - } - atomic.AddUint64(&s.stats.TotFileMergePlanOk, 1) - - atomic.AddUint64(&s.stats.TotFileMergePlanTasks, uint64(len(resultMergePlan.Tasks))) - - // process tasks in serial for now - var filenames []string - - cw := newCloseChWrapper(s.closeCh, ctx) - defer cw.close() - - go cw.listen() - - for _, task := range resultMergePlan.Tasks { - if len(task.Segments) == 0 { - atomic.AddUint64(&s.stats.TotFileMergePlanTasksSegmentsEmpty, 1) - continue - } - - atomic.AddUint64(&s.stats.TotFileMergePlanTasksSegments, uint64(len(task.Segments))) - - oldMap := make(map[uint64]*SegmentSnapshot) - newSegmentID := atomic.AddUint64(&s.nextSegmentID, 1) - segmentsToMerge := make([]segment.Segment, 0, len(task.Segments)) - docsToDrop := make([]*roaring.Bitmap, 0, len(task.Segments)) - - for _, planSegment := range task.Segments { - if segSnapshot, ok := planSegment.(*SegmentSnapshot); ok { - oldMap[segSnapshot.id] = segSnapshot - if persistedSeg, ok := segSnapshot.segment.(segment.PersistedSegment); ok { - if segSnapshot.LiveSize() == 0 { - atomic.AddUint64(&s.stats.TotFileMergeSegmentsEmpty, 1) - oldMap[segSnapshot.id] = nil - } else { - segmentsToMerge = append(segmentsToMerge, segSnapshot.segment) - docsToDrop = append(docsToDrop, segSnapshot.deleted) - } - // track the files getting merged for unsetting the - // removal ineligibility. This helps to unflip files - // even with fast merger, slow persister work flows. - path := persistedSeg.Path() - filenames = append(filenames, - strings.TrimPrefix(path, s.path+string(os.PathSeparator))) - } - } - } - - var oldNewDocNums map[uint64][]uint64 - var seg segment.Segment - var filename string - if len(segmentsToMerge) > 0 { - filename = zapFileName(newSegmentID) - s.markIneligibleForRemoval(filename) - path := s.path + string(os.PathSeparator) + filename - - fileMergeZapStartTime := time.Now() - - atomic.AddUint64(&s.stats.TotFileMergeZapBeg, 1) - newDocNums, _, err := s.segPlugin.Merge(segmentsToMerge, docsToDrop, path, - cw.closeCh, s) - atomic.AddUint64(&s.stats.TotFileMergeZapEnd, 1) - - fileMergeZapTime := uint64(time.Since(fileMergeZapStartTime)) - atomic.AddUint64(&s.stats.TotFileMergeZapTime, fileMergeZapTime) - if atomic.LoadUint64(&s.stats.MaxFileMergeZapTime) < fileMergeZapTime { - atomic.StoreUint64(&s.stats.MaxFileMergeZapTime, fileMergeZapTime) - } - - if err != nil { - s.unmarkIneligibleForRemoval(filename) - atomic.AddUint64(&s.stats.TotFileMergePlanTasksErr, 1) - if err == segment.ErrClosed { - return err - } - return fmt.Errorf("merging failed: %v", err) - } - - seg, err = s.segPlugin.Open(path) - if err != nil { - s.unmarkIneligibleForRemoval(filename) - atomic.AddUint64(&s.stats.TotFileMergePlanTasksErr, 1) - return err - } - oldNewDocNums = make(map[uint64][]uint64) - for i, segNewDocNums := range newDocNums { - oldNewDocNums[task.Segments[i].Id()] = segNewDocNums - } - - atomic.AddUint64(&s.stats.TotFileMergeSegments, uint64(len(segmentsToMerge))) - } - - sm := &segmentMerge{ - id: newSegmentID, - old: oldMap, - oldNewDocNums: oldNewDocNums, - new: seg, - notifyCh: make(chan *mergeTaskIntroStatus), - } - - s.fireEvent(EventKindMergeTaskIntroductionStart, 0) - - // give it to the introducer - select { - case <-s.closeCh: - _ = seg.Close() - return segment.ErrClosed - case s.merges <- sm: - atomic.AddUint64(&s.stats.TotFileMergeIntroductions, 1) - } - - introStartTime := time.Now() - // it is safe to blockingly wait for the merge introduction - // here as the introducer is bound to handle the notify channel. - introStatus := <-sm.notifyCh - introTime := uint64(time.Since(introStartTime)) - atomic.AddUint64(&s.stats.TotFileMergeZapIntroductionTime, introTime) - if atomic.LoadUint64(&s.stats.MaxFileMergeZapIntroductionTime) < introTime { - atomic.StoreUint64(&s.stats.MaxFileMergeZapIntroductionTime, introTime) - } - atomic.AddUint64(&s.stats.TotFileMergeIntroductionsDone, 1) - if introStatus != nil && introStatus.indexSnapshot != nil { - _ = introStatus.indexSnapshot.DecRef() - if introStatus.skipped { - // close the segment on skipping introduction. - s.unmarkIneligibleForRemoval(filename) - _ = seg.Close() - } - } - - atomic.AddUint64(&s.stats.TotFileMergePlanTasksDone, 1) - - s.fireEvent(EventKindMergeTaskIntroduction, 0) - } - - // once all the newly merged segment introductions are done, - // its safe to unflip the removal ineligibility for the replaced - // older segments - for _, f := range filenames { - s.unmarkIneligibleForRemoval(f) - } - - return nil -} - -type mergeTaskIntroStatus struct { - indexSnapshot *IndexSnapshot - skipped bool -} - -type segmentMerge struct { - id uint64 - old map[uint64]*SegmentSnapshot - oldNewDocNums map[uint64][]uint64 - new segment.Segment - notifyCh chan *mergeTaskIntroStatus -} - -// perform a merging of the given SegmentBase instances into a new, -// persisted segment, and synchronously introduce that new segment -// into the root -func (s *Scorch) mergeSegmentBases(snapshot *IndexSnapshot, - sbs []segment.Segment, sbsDrops []*roaring.Bitmap, - sbsIndexes []int) (*IndexSnapshot, uint64, error) { - atomic.AddUint64(&s.stats.TotMemMergeBeg, 1) - - memMergeZapStartTime := time.Now() - - atomic.AddUint64(&s.stats.TotMemMergeZapBeg, 1) - - newSegmentID := atomic.AddUint64(&s.nextSegmentID, 1) - filename := zapFileName(newSegmentID) - path := s.path + string(os.PathSeparator) + filename - - newDocNums, _, err := - s.segPlugin.Merge(sbs, sbsDrops, path, s.closeCh, s) - - atomic.AddUint64(&s.stats.TotMemMergeZapEnd, 1) - - memMergeZapTime := uint64(time.Since(memMergeZapStartTime)) - atomic.AddUint64(&s.stats.TotMemMergeZapTime, memMergeZapTime) - if atomic.LoadUint64(&s.stats.MaxMemMergeZapTime) < memMergeZapTime { - atomic.StoreUint64(&s.stats.MaxMemMergeZapTime, memMergeZapTime) - } - - if err != nil { - atomic.AddUint64(&s.stats.TotMemMergeErr, 1) - return nil, 0, err - } - - seg, err := s.segPlugin.Open(path) - if err != nil { - atomic.AddUint64(&s.stats.TotMemMergeErr, 1) - return nil, 0, err - } - - // update persisted stats - atomic.AddUint64(&s.stats.TotPersistedItems, seg.Count()) - atomic.AddUint64(&s.stats.TotPersistedSegments, 1) - - sm := &segmentMerge{ - id: newSegmentID, - old: make(map[uint64]*SegmentSnapshot), - oldNewDocNums: make(map[uint64][]uint64), - new: seg, - notifyCh: make(chan *mergeTaskIntroStatus), - } - - for i, idx := range sbsIndexes { - ss := snapshot.segment[idx] - sm.old[ss.id] = ss - sm.oldNewDocNums[ss.id] = newDocNums[i] - } - - select { // send to introducer - case <-s.closeCh: - _ = seg.DecRef() - return nil, 0, segment.ErrClosed - case s.merges <- sm: - } - - // blockingly wait for the introduction to complete - var newSnapshot *IndexSnapshot - introStatus := <-sm.notifyCh - if introStatus != nil && introStatus.indexSnapshot != nil { - newSnapshot = introStatus.indexSnapshot - atomic.AddUint64(&s.stats.TotMemMergeSegments, uint64(len(sbs))) - atomic.AddUint64(&s.stats.TotMemMergeDone, 1) - if introStatus.skipped { - // close the segment on skipping introduction. - _ = newSnapshot.DecRef() - _ = seg.Close() - newSnapshot = nil - } - } - - return newSnapshot, newSegmentID, nil -} - -func (s *Scorch) ReportBytesWritten(bytesWritten uint64) { - atomic.AddUint64(&s.stats.TotFileMergeWrittenBytes, bytesWritten) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/persister.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/persister.go deleted file mode 100644 index 36a0379e6..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/persister.go +++ /dev/null @@ -1,990 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "math" - "os" - "path/filepath" - "strconv" - "strings" - "sync/atomic" - "time" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - bolt "go.etcd.io/bbolt" -) - -// DefaultPersisterNapTimeMSec is kept to zero as this helps in direct -// persistence of segments with the default safe batch option. -// If the default safe batch option results in high number of -// files on disk, then users may initialise this configuration parameter -// with higher values so that the persister will nap a bit within it's -// work loop to favour better in-memory merging of segments to result -// in fewer segment files on disk. But that may come with an indexing -// performance overhead. -// Unsafe batch users are advised to override this to higher value -// for better performance especially with high data density. -var DefaultPersisterNapTimeMSec int = 0 // ms - -// DefaultPersisterNapUnderNumFiles helps in controlling the pace of -// persister. At times of a slow merger progress with heavy file merging -// operations, its better to pace down the persister for letting the merger -// to catch up within a range defined by this parameter. -// Fewer files on disk (as per the merge plan) would result in keeping the -// file handle usage under limit, faster disk merger and a healthier index. -// Its been observed that such a loosely sync'ed introducer-persister-merger -// trio results in better overall performance. -var DefaultPersisterNapUnderNumFiles int = 1000 - -var DefaultMemoryPressurePauseThreshold uint64 = math.MaxUint64 - -type persisterOptions struct { - // PersisterNapTimeMSec controls the wait/delay injected into - // persistence workloop to improve the chances for - // a healthier and heavier in-memory merging - PersisterNapTimeMSec int - - // PersisterNapTimeMSec > 0, and the number of files is less than - // PersisterNapUnderNumFiles, then the persister will sleep - // PersisterNapTimeMSec amount of time to improve the chances for - // a healthier and heavier in-memory merging - PersisterNapUnderNumFiles int - - // MemoryPressurePauseThreshold let persister to have a better leeway - // for prudently performing the memory merge of segments on a memory - // pressure situation. Here the config value is an upper threshold - // for the number of paused application threads. The default value would - // be a very high number to always favour the merging of memory segments. - MemoryPressurePauseThreshold uint64 -} - -type notificationChan chan struct{} - -func (s *Scorch) persisterLoop() { - defer s.asyncTasks.Done() - - var persistWatchers []*epochWatcher - var lastPersistedEpoch, lastMergedEpoch uint64 - var ew *epochWatcher - - var unpersistedCallbacks []index.BatchCallback - - po, err := s.parsePersisterOptions() - if err != nil { - s.fireAsyncError(fmt.Errorf("persisterOptions json parsing err: %v", err)) - s.asyncTasks.Done() - return - } - -OUTER: - for { - atomic.AddUint64(&s.stats.TotPersistLoopBeg, 1) - - select { - case <-s.closeCh: - break OUTER - case ew = <-s.persisterNotifier: - persistWatchers = append(persistWatchers, ew) - default: - } - if ew != nil && ew.epoch > lastMergedEpoch { - lastMergedEpoch = ew.epoch - } - lastMergedEpoch, persistWatchers = s.pausePersisterForMergerCatchUp(lastPersistedEpoch, - lastMergedEpoch, persistWatchers, po) - - var ourSnapshot *IndexSnapshot - var ourPersisted []chan error - var ourPersistedCallbacks []index.BatchCallback - - // check to see if there is a new snapshot to persist - s.rootLock.Lock() - if s.root != nil && s.root.epoch > lastPersistedEpoch { - ourSnapshot = s.root - ourSnapshot.AddRef() - ourPersisted = s.rootPersisted - s.rootPersisted = nil - ourPersistedCallbacks = s.persistedCallbacks - s.persistedCallbacks = nil - atomic.StoreUint64(&s.iStats.persistSnapshotSize, uint64(ourSnapshot.Size())) - atomic.StoreUint64(&s.iStats.persistEpoch, ourSnapshot.epoch) - } - s.rootLock.Unlock() - - if ourSnapshot != nil { - startTime := time.Now() - - err := s.persistSnapshot(ourSnapshot, po) - for _, ch := range ourPersisted { - if err != nil { - ch <- err - } - close(ch) - } - if err != nil { - atomic.StoreUint64(&s.iStats.persistEpoch, 0) - if err == segment.ErrClosed { - // index has been closed - _ = ourSnapshot.DecRef() - break OUTER - } - - // save this current snapshot's persistedCallbacks, to invoke during - // the retry attempt - unpersistedCallbacks = append(unpersistedCallbacks, ourPersistedCallbacks...) - - s.fireAsyncError(fmt.Errorf("got err persisting snapshot: %v", err)) - _ = ourSnapshot.DecRef() - atomic.AddUint64(&s.stats.TotPersistLoopErr, 1) - continue OUTER - } - - if unpersistedCallbacks != nil { - // in the event of this being a retry attempt for persisting a snapshot - // that had earlier failed, prepend the persistedCallbacks associated - // with earlier segment(s) to the latest persistedCallbacks - ourPersistedCallbacks = append(unpersistedCallbacks, ourPersistedCallbacks...) - unpersistedCallbacks = nil - } - - for i := range ourPersistedCallbacks { - ourPersistedCallbacks[i](err) - } - - atomic.StoreUint64(&s.stats.LastPersistedEpoch, ourSnapshot.epoch) - - lastPersistedEpoch = ourSnapshot.epoch - for _, ew := range persistWatchers { - close(ew.notifyCh) - } - - persistWatchers = nil - _ = ourSnapshot.DecRef() - - changed := false - s.rootLock.RLock() - if s.root != nil && s.root.epoch != lastPersistedEpoch { - changed = true - } - s.rootLock.RUnlock() - - s.fireEvent(EventKindPersisterProgress, time.Since(startTime)) - - if changed { - atomic.AddUint64(&s.stats.TotPersistLoopProgress, 1) - continue OUTER - } - } - - // tell the introducer we're waiting for changes - w := &epochWatcher{ - epoch: lastPersistedEpoch, - notifyCh: make(notificationChan, 1), - } - - select { - case <-s.closeCh: - break OUTER - case s.introducerNotifier <- w: - } - - s.removeOldData() // might as well cleanup while waiting - - atomic.AddUint64(&s.stats.TotPersistLoopWait, 1) - - select { - case <-s.closeCh: - break OUTER - case <-w.notifyCh: - // woken up, next loop should pick up work - atomic.AddUint64(&s.stats.TotPersistLoopWaitNotified, 1) - case ew = <-s.persisterNotifier: - // if the watchers are already caught up then let them wait, - // else let them continue to do the catch up - persistWatchers = append(persistWatchers, ew) - } - - atomic.AddUint64(&s.stats.TotPersistLoopEnd, 1) - } -} - -func notifyMergeWatchers(lastPersistedEpoch uint64, - persistWatchers []*epochWatcher) []*epochWatcher { - var watchersNext []*epochWatcher - for _, w := range persistWatchers { - if w.epoch < lastPersistedEpoch { - close(w.notifyCh) - } else { - watchersNext = append(watchersNext, w) - } - } - return watchersNext -} - -func (s *Scorch) pausePersisterForMergerCatchUp(lastPersistedEpoch uint64, - lastMergedEpoch uint64, persistWatchers []*epochWatcher, - po *persisterOptions) (uint64, []*epochWatcher) { - - // First, let the watchers proceed if they lag behind - persistWatchers = notifyMergeWatchers(lastPersistedEpoch, persistWatchers) - - // Check the merger lag by counting the segment files on disk, - numFilesOnDisk, _, _ := s.diskFileStats(nil) - - // On finding fewer files on disk, persister takes a short pause - // for sufficient in-memory segments to pile up for the next - // memory merge cum persist loop. - if numFilesOnDisk < uint64(po.PersisterNapUnderNumFiles) && - po.PersisterNapTimeMSec > 0 && s.NumEventsBlocking() == 0 { - select { - case <-s.closeCh: - case <-time.After(time.Millisecond * time.Duration(po.PersisterNapTimeMSec)): - atomic.AddUint64(&s.stats.TotPersisterNapPauseCompleted, 1) - - case ew := <-s.persisterNotifier: - // unblock the merger in meantime - persistWatchers = append(persistWatchers, ew) - lastMergedEpoch = ew.epoch - persistWatchers = notifyMergeWatchers(lastPersistedEpoch, persistWatchers) - atomic.AddUint64(&s.stats.TotPersisterMergerNapBreak, 1) - } - return lastMergedEpoch, persistWatchers - } - - // Finding too many files on disk could be due to two reasons. - // 1. Too many older snapshots awaiting the clean up. - // 2. The merger could be lagging behind on merging the disk files. - if numFilesOnDisk > uint64(po.PersisterNapUnderNumFiles) { - s.removeOldData() - numFilesOnDisk, _, _ = s.diskFileStats(nil) - } - - // Persister pause until the merger catches up to reduce the segment - // file count under the threshold. - // But if there is memory pressure, then skip this sleep maneuvers. -OUTER: - for po.PersisterNapUnderNumFiles > 0 && - numFilesOnDisk >= uint64(po.PersisterNapUnderNumFiles) && - lastMergedEpoch < lastPersistedEpoch { - atomic.AddUint64(&s.stats.TotPersisterSlowMergerPause, 1) - - select { - case <-s.closeCh: - break OUTER - case ew := <-s.persisterNotifier: - persistWatchers = append(persistWatchers, ew) - lastMergedEpoch = ew.epoch - } - - atomic.AddUint64(&s.stats.TotPersisterSlowMergerResume, 1) - - // let the watchers proceed if they lag behind - persistWatchers = notifyMergeWatchers(lastPersistedEpoch, persistWatchers) - - numFilesOnDisk, _, _ = s.diskFileStats(nil) - } - - return lastMergedEpoch, persistWatchers -} - -func (s *Scorch) parsePersisterOptions() (*persisterOptions, error) { - po := persisterOptions{ - PersisterNapTimeMSec: DefaultPersisterNapTimeMSec, - PersisterNapUnderNumFiles: DefaultPersisterNapUnderNumFiles, - MemoryPressurePauseThreshold: DefaultMemoryPressurePauseThreshold, - } - if v, ok := s.config["scorchPersisterOptions"]; ok { - b, err := json.Marshal(v) - if err != nil { - return &po, err - } - - err = json.Unmarshal(b, &po) - if err != nil { - return &po, err - } - } - return &po, nil -} - -func (s *Scorch) persistSnapshot(snapshot *IndexSnapshot, - po *persisterOptions) error { - // Perform in-memory segment merging only when the memory pressure is - // below the configured threshold, else the persister performs the - // direct persistence of segments. - if s.NumEventsBlocking() < po.MemoryPressurePauseThreshold { - persisted, err := s.persistSnapshotMaybeMerge(snapshot) - if err != nil { - return err - } - if persisted { - return nil - } - } - - return s.persistSnapshotDirect(snapshot) -} - -// DefaultMinSegmentsForInMemoryMerge represents the default number of -// in-memory zap segments that persistSnapshotMaybeMerge() needs to -// see in an IndexSnapshot before it decides to merge and persist -// those segments -var DefaultMinSegmentsForInMemoryMerge = 2 - -// persistSnapshotMaybeMerge examines the snapshot and might merge and -// persist the in-memory zap segments if there are enough of them -func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot) ( - bool, error) { - // collect the in-memory zap segments (SegmentBase instances) - var sbs []segment.Segment - var sbsDrops []*roaring.Bitmap - var sbsIndexes []int - - for i, segmentSnapshot := range snapshot.segment { - if _, ok := segmentSnapshot.segment.(segment.PersistedSegment); !ok { - sbs = append(sbs, segmentSnapshot.segment) - sbsDrops = append(sbsDrops, segmentSnapshot.deleted) - sbsIndexes = append(sbsIndexes, i) - } - } - - if len(sbs) < DefaultMinSegmentsForInMemoryMerge { - return false, nil - } - - newSnapshot, newSegmentID, err := s.mergeSegmentBases( - snapshot, sbs, sbsDrops, sbsIndexes) - if err != nil { - return false, err - } - if newSnapshot == nil { - return false, nil - } - - defer func() { - _ = newSnapshot.DecRef() - }() - - mergedSegmentIDs := map[uint64]struct{}{} - for _, idx := range sbsIndexes { - mergedSegmentIDs[snapshot.segment[idx].id] = struct{}{} - } - - // construct a snapshot that's logically equivalent to the input - // snapshot, but with merged segments replaced by the new segment - equiv := &IndexSnapshot{ - parent: snapshot.parent, - segment: make([]*SegmentSnapshot, 0, len(snapshot.segment)), - internal: snapshot.internal, - epoch: snapshot.epoch, - creator: "persistSnapshotMaybeMerge", - } - - // copy to the equiv the segments that weren't replaced - for _, segment := range snapshot.segment { - if _, wasMerged := mergedSegmentIDs[segment.id]; !wasMerged { - equiv.segment = append(equiv.segment, segment) - } - } - - // append to the equiv the new segment - for _, segment := range newSnapshot.segment { - if segment.id == newSegmentID { - equiv.segment = append(equiv.segment, &SegmentSnapshot{ - id: newSegmentID, - segment: segment.segment, - deleted: nil, // nil since merging handled deletions - }) - break - } - } - - err = s.persistSnapshotDirect(equiv) - if err != nil { - return false, err - } - - return true, nil -} - -func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string, - segPlugin SegmentPlugin) ([]string, map[uint64]string, error) { - snapshotsBucket, err := tx.CreateBucketIfNotExists(boltSnapshotsBucket) - if err != nil { - return nil, nil, err - } - newSnapshotKey := encodeUvarintAscending(nil, snapshot.epoch) - snapshotBucket, err := snapshotsBucket.CreateBucketIfNotExists(newSnapshotKey) - if err != nil { - return nil, nil, err - } - - // persist meta values - metaBucket, err := snapshotBucket.CreateBucketIfNotExists(boltMetaDataKey) - if err != nil { - return nil, nil, err - } - err = metaBucket.Put(boltMetaDataSegmentTypeKey, []byte(segPlugin.Type())) - if err != nil { - return nil, nil, err - } - buf := make([]byte, binary.MaxVarintLen32) - binary.BigEndian.PutUint32(buf, segPlugin.Version()) - err = metaBucket.Put(boltMetaDataSegmentVersionKey, buf) - if err != nil { - return nil, nil, err - } - - // persist internal values - internalBucket, err := snapshotBucket.CreateBucketIfNotExists(boltInternalKey) - if err != nil { - return nil, nil, err - } - // TODO optimize writing these in order? - for k, v := range snapshot.internal { - err = internalBucket.Put([]byte(k), v) - if err != nil { - return nil, nil, err - } - } - - var filenames []string - newSegmentPaths := make(map[uint64]string) - - // first ensure that each segment in this snapshot has been persisted - for _, segmentSnapshot := range snapshot.segment { - snapshotSegmentKey := encodeUvarintAscending(nil, segmentSnapshot.id) - snapshotSegmentBucket, err := snapshotBucket.CreateBucketIfNotExists(snapshotSegmentKey) - if err != nil { - return nil, nil, err - } - switch seg := segmentSnapshot.segment.(type) { - case segment.PersistedSegment: - segPath := seg.Path() - filename := strings.TrimPrefix(segPath, path+string(os.PathSeparator)) - err = snapshotSegmentBucket.Put(boltPathKey, []byte(filename)) - if err != nil { - return nil, nil, err - } - filenames = append(filenames, filename) - case segment.UnpersistedSegment: - // need to persist this to disk - filename := zapFileName(segmentSnapshot.id) - path := path + string(os.PathSeparator) + filename - err = seg.Persist(path) - if err != nil { - return nil, nil, fmt.Errorf("error persisting segment: %v", err) - } - newSegmentPaths[segmentSnapshot.id] = path - err = snapshotSegmentBucket.Put(boltPathKey, []byte(filename)) - if err != nil { - return nil, nil, err - } - filenames = append(filenames, filename) - default: - return nil, nil, fmt.Errorf("unknown segment type: %T", seg) - } - // store current deleted bits - var roaringBuf bytes.Buffer - if segmentSnapshot.deleted != nil { - _, err = segmentSnapshot.deleted.WriteTo(&roaringBuf) - if err != nil { - return nil, nil, fmt.Errorf("error persisting roaring bytes: %v", err) - } - err = snapshotSegmentBucket.Put(boltDeletedKey, roaringBuf.Bytes()) - if err != nil { - return nil, nil, err - } - } - } - - return filenames, newSegmentPaths, nil -} - -func (s *Scorch) persistSnapshotDirect(snapshot *IndexSnapshot) (err error) { - // start a write transaction - tx, err := s.rootBolt.Begin(true) - if err != nil { - return err - } - // defer rollback on error - defer func() { - if err != nil { - _ = tx.Rollback() - } - }() - - filenames, newSegmentPaths, err := prepareBoltSnapshot(snapshot, tx, s.path, s.segPlugin) - if err != nil { - return err - } - - // we need to swap in a new root only when we've persisted 1 or - // more segments -- whereby the new root would have 1-for-1 - // replacements of in-memory segments with file-based segments - // - // other cases like updates to internal values only, and/or when - // there are only deletions, are already covered and persisted by - // the newly populated boltdb snapshotBucket above - if len(newSegmentPaths) > 0 { - // now try to open all the new snapshots - newSegments := make(map[uint64]segment.Segment) - defer func() { - for _, s := range newSegments { - if s != nil { - // cleanup segments that were opened but not - // swapped into the new root - _ = s.Close() - } - } - }() - for segmentID, path := range newSegmentPaths { - newSegments[segmentID], err = s.segPlugin.Open(path) - if err != nil { - return fmt.Errorf("error opening new segment at %s, %v", path, err) - } - } - - persist := &persistIntroduction{ - persisted: newSegments, - applied: make(notificationChan), - } - - select { - case <-s.closeCh: - return segment.ErrClosed - case s.persists <- persist: - } - - select { - case <-s.closeCh: - return segment.ErrClosed - case <-persist.applied: - } - } - - err = tx.Commit() - if err != nil { - return err - } - - err = s.rootBolt.Sync() - if err != nil { - return err - } - - // allow files to become eligible for removal after commit, such - // as file segments from snapshots that came from the merger - s.rootLock.Lock() - for _, filename := range filenames { - delete(s.ineligibleForRemoval, filename) - } - s.rootLock.Unlock() - - return nil -} - -func zapFileName(epoch uint64) string { - return fmt.Sprintf("%012x.zap", epoch) -} - -// bolt snapshot code - -var boltSnapshotsBucket = []byte{'s'} -var boltPathKey = []byte{'p'} -var boltDeletedKey = []byte{'d'} -var boltInternalKey = []byte{'i'} -var boltMetaDataKey = []byte{'m'} -var boltMetaDataSegmentTypeKey = []byte("type") -var boltMetaDataSegmentVersionKey = []byte("version") - -func (s *Scorch) loadFromBolt() error { - return s.rootBolt.View(func(tx *bolt.Tx) error { - snapshots := tx.Bucket(boltSnapshotsBucket) - if snapshots == nil { - return nil - } - foundRoot := false - c := snapshots.Cursor() - for k, _ := c.Last(); k != nil; k, _ = c.Prev() { - _, snapshotEpoch, err := decodeUvarintAscending(k) - if err != nil { - log.Printf("unable to parse segment epoch %x, continuing", k) - continue - } - if foundRoot { - s.AddEligibleForRemoval(snapshotEpoch) - continue - } - snapshot := snapshots.Bucket(k) - if snapshot == nil { - log.Printf("snapshot key, but bucket missing %x, continuing", k) - s.AddEligibleForRemoval(snapshotEpoch) - continue - } - indexSnapshot, err := s.loadSnapshot(snapshot) - if err != nil { - log.Printf("unable to load snapshot, %v, continuing", err) - s.AddEligibleForRemoval(snapshotEpoch) - continue - } - indexSnapshot.epoch = snapshotEpoch - // set the nextSegmentID - s.nextSegmentID, err = s.maxSegmentIDOnDisk() - if err != nil { - return err - } - s.nextSegmentID++ - s.rootLock.Lock() - s.nextSnapshotEpoch = snapshotEpoch + 1 - rootPrev := s.root - s.root = indexSnapshot - s.rootLock.Unlock() - - if rootPrev != nil { - _ = rootPrev.DecRef() - } - - foundRoot = true - } - return nil - }) -} - -// LoadSnapshot loads the segment with the specified epoch -// NOTE: this is currently ONLY intended to be used by the command-line tool -func (s *Scorch) LoadSnapshot(epoch uint64) (rv *IndexSnapshot, err error) { - err = s.rootBolt.View(func(tx *bolt.Tx) error { - snapshots := tx.Bucket(boltSnapshotsBucket) - if snapshots == nil { - return nil - } - snapshotKey := encodeUvarintAscending(nil, epoch) - snapshot := snapshots.Bucket(snapshotKey) - if snapshot == nil { - return fmt.Errorf("snapshot with epoch: %v - doesn't exist", epoch) - } - rv, err = s.loadSnapshot(snapshot) - return err - }) - if err != nil { - return nil, err - } - return rv, nil -} - -func (s *Scorch) loadSnapshot(snapshot *bolt.Bucket) (*IndexSnapshot, error) { - - rv := &IndexSnapshot{ - parent: s, - internal: make(map[string][]byte), - refs: 1, - creator: "loadSnapshot", - } - // first we look for the meta-data bucket, this will tell us - // which segment type/version was used for this snapshot - // all operations for this scorch will use this type/version - metaBucket := snapshot.Bucket(boltMetaDataKey) - if metaBucket == nil { - _ = rv.DecRef() - return nil, fmt.Errorf("meta-data bucket missing") - } - segmentType := string(metaBucket.Get(boltMetaDataSegmentTypeKey)) - segmentVersion := binary.BigEndian.Uint32( - metaBucket.Get(boltMetaDataSegmentVersionKey)) - err := s.loadSegmentPlugin(segmentType, segmentVersion) - if err != nil { - _ = rv.DecRef() - return nil, fmt.Errorf( - "unable to load correct segment wrapper: %v", err) - } - var running uint64 - c := snapshot.Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - if k[0] == boltInternalKey[0] { - internalBucket := snapshot.Bucket(k) - err := internalBucket.ForEach(func(key []byte, val []byte) error { - copiedVal := append([]byte(nil), val...) - rv.internal[string(key)] = copiedVal - return nil - }) - if err != nil { - _ = rv.DecRef() - return nil, err - } - } else if k[0] != boltMetaDataKey[0] { - segmentBucket := snapshot.Bucket(k) - if segmentBucket == nil { - _ = rv.DecRef() - return nil, fmt.Errorf("segment key, but bucket missing % x", k) - } - segmentSnapshot, err := s.loadSegment(segmentBucket) - if err != nil { - _ = rv.DecRef() - return nil, fmt.Errorf("failed to load segment: %v", err) - } - _, segmentSnapshot.id, err = decodeUvarintAscending(k) - if err != nil { - _ = rv.DecRef() - return nil, fmt.Errorf("failed to decode segment id: %v", err) - } - rv.segment = append(rv.segment, segmentSnapshot) - rv.offsets = append(rv.offsets, running) - running += segmentSnapshot.segment.Count() - } - } - return rv, nil -} - -func (s *Scorch) loadSegment(segmentBucket *bolt.Bucket) (*SegmentSnapshot, error) { - pathBytes := segmentBucket.Get(boltPathKey) - if pathBytes == nil { - return nil, fmt.Errorf("segment path missing") - } - segmentPath := s.path + string(os.PathSeparator) + string(pathBytes) - segment, err := s.segPlugin.Open(segmentPath) - if err != nil { - return nil, fmt.Errorf("error opening bolt segment: %v", err) - } - - rv := &SegmentSnapshot{ - segment: segment, - cachedDocs: &cachedDocs{cache: nil}, - } - deletedBytes := segmentBucket.Get(boltDeletedKey) - if deletedBytes != nil { - deletedBitmap := roaring.NewBitmap() - r := bytes.NewReader(deletedBytes) - _, err := deletedBitmap.ReadFrom(r) - if err != nil { - _ = segment.Close() - return nil, fmt.Errorf("error reading deleted bytes: %v", err) - } - if !deletedBitmap.IsEmpty() { - rv.deleted = deletedBitmap - } - } - - return rv, nil -} - -func (s *Scorch) removeOldData() { - removed, err := s.removeOldBoltSnapshots() - if err != nil { - s.fireAsyncError(fmt.Errorf("got err removing old bolt snapshots: %v", err)) - } - atomic.AddUint64(&s.stats.TotSnapshotsRemovedFromMetaStore, uint64(removed)) - - err = s.removeOldZapFiles() - if err != nil { - s.fireAsyncError(fmt.Errorf("got err removing old zap files: %v", err)) - } -} - -// NumSnapshotsToKeep represents how many recent, old snapshots to -// keep around per Scorch instance. Useful for apps that require -// rollback'ability. -var NumSnapshotsToKeep = 1 - -// Removes enough snapshots from the rootBolt so that the -// s.eligibleForRemoval stays under the NumSnapshotsToKeep policy. -func (s *Scorch) removeOldBoltSnapshots() (numRemoved int, err error) { - persistedEpochs, err := s.RootBoltSnapshotEpochs() - if err != nil { - return 0, err - } - - if len(persistedEpochs) <= s.numSnapshotsToKeep { - // we need to keep everything - return 0, nil - } - - // make a map of epochs to protect from deletion - protectedEpochs := make(map[uint64]struct{}, s.numSnapshotsToKeep) - for _, epoch := range persistedEpochs[0:s.numSnapshotsToKeep] { - protectedEpochs[epoch] = struct{}{} - } - - var epochsToRemove []uint64 - var newEligible []uint64 - s.rootLock.Lock() - for _, epoch := range s.eligibleForRemoval { - if _, ok := protectedEpochs[epoch]; ok { - // protected - newEligible = append(newEligible, epoch) - } else { - epochsToRemove = append(epochsToRemove, epoch) - } - } - s.eligibleForRemoval = newEligible - s.rootLock.Unlock() - - if len(epochsToRemove) == 0 { - return 0, nil - } - - tx, err := s.rootBolt.Begin(true) - if err != nil { - return 0, err - } - defer func() { - if err == nil { - err = tx.Commit() - } else { - _ = tx.Rollback() - } - if err == nil { - err = s.rootBolt.Sync() - } - }() - - snapshots := tx.Bucket(boltSnapshotsBucket) - if snapshots == nil { - return 0, nil - } - - for _, epochToRemove := range epochsToRemove { - k := encodeUvarintAscending(nil, epochToRemove) - err = snapshots.DeleteBucket(k) - if err == bolt.ErrBucketNotFound { - err = nil - } - if err == nil { - numRemoved++ - } - } - - return numRemoved, err -} - -func (s *Scorch) maxSegmentIDOnDisk() (uint64, error) { - currFileInfos, err := ioutil.ReadDir(s.path) - if err != nil { - return 0, err - } - - var rv uint64 - for _, finfo := range currFileInfos { - fname := finfo.Name() - if filepath.Ext(fname) == ".zap" { - prefix := strings.TrimSuffix(fname, ".zap") - id, err2 := strconv.ParseUint(prefix, 16, 64) - if err2 != nil { - return 0, err2 - } - if id > rv { - rv = id - } - } - } - return rv, err -} - -// Removes any *.zap files which aren't listed in the rootBolt. -func (s *Scorch) removeOldZapFiles() error { - liveFileNames, err := s.loadZapFileNames() - if err != nil { - return err - } - - currFileInfos, err := ioutil.ReadDir(s.path) - if err != nil { - return err - } - - s.rootLock.RLock() - - for _, finfo := range currFileInfos { - fname := finfo.Name() - if filepath.Ext(fname) == ".zap" { - if _, exists := liveFileNames[fname]; !exists && !s.ineligibleForRemoval[fname] { - err := os.Remove(s.path + string(os.PathSeparator) + fname) - if err != nil { - log.Printf("got err removing file: %s, err: %v", fname, err) - } - } - } - } - - s.rootLock.RUnlock() - - return nil -} - -func (s *Scorch) RootBoltSnapshotEpochs() ([]uint64, error) { - var rv []uint64 - err := s.rootBolt.View(func(tx *bolt.Tx) error { - snapshots := tx.Bucket(boltSnapshotsBucket) - if snapshots == nil { - return nil - } - sc := snapshots.Cursor() - for sk, _ := sc.Last(); sk != nil; sk, _ = sc.Prev() { - _, snapshotEpoch, err := decodeUvarintAscending(sk) - if err != nil { - continue - } - rv = append(rv, snapshotEpoch) - } - return nil - }) - return rv, err -} - -// Returns the *.zap file names that are listed in the rootBolt. -func (s *Scorch) loadZapFileNames() (map[string]struct{}, error) { - rv := map[string]struct{}{} - err := s.rootBolt.View(func(tx *bolt.Tx) error { - snapshots := tx.Bucket(boltSnapshotsBucket) - if snapshots == nil { - return nil - } - sc := snapshots.Cursor() - for sk, _ := sc.First(); sk != nil; sk, _ = sc.Next() { - snapshot := snapshots.Bucket(sk) - if snapshot == nil { - continue - } - segc := snapshot.Cursor() - for segk, _ := segc.First(); segk != nil; segk, _ = segc.Next() { - if segk[0] == boltInternalKey[0] { - continue - } - segmentBucket := snapshot.Bucket(segk) - if segmentBucket == nil { - continue - } - pathBytes := segmentBucket.Get(boltPathKey) - if pathBytes == nil { - continue - } - pathString := string(pathBytes) - rv[string(pathString)] = struct{}{} - } - } - return nil - }) - - return rv, err -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/rollback.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/rollback.go deleted file mode 100644 index 067220e6f..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/rollback.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "fmt" - "log" - "os" - - bolt "go.etcd.io/bbolt" -) - -type RollbackPoint struct { - epoch uint64 - meta map[string][]byte -} - -func (r *RollbackPoint) GetInternal(key []byte) []byte { - return r.meta[string(key)] -} - -// RollbackPoints returns an array of rollback points available for -// the application to rollback to, with more recent rollback points -// (higher epochs) coming first. -func RollbackPoints(path string) ([]*RollbackPoint, error) { - if len(path) == 0 { - return nil, fmt.Errorf("RollbackPoints: invalid path") - } - - rootBoltPath := path + string(os.PathSeparator) + "root.bolt" - rootBoltOpt := &bolt.Options{ - ReadOnly: true, - } - rootBolt, err := bolt.Open(rootBoltPath, 0600, rootBoltOpt) - if err != nil || rootBolt == nil { - return nil, err - } - - // start a read-only bolt transaction - tx, err := rootBolt.Begin(false) - if err != nil { - return nil, fmt.Errorf("RollbackPoints: failed to start" + - " read-only transaction") - } - - // read-only bolt transactions to be rolled back - defer func() { - _ = tx.Rollback() - _ = rootBolt.Close() - }() - - snapshots := tx.Bucket(boltSnapshotsBucket) - if snapshots == nil { - return nil, nil - } - - rollbackPoints := []*RollbackPoint{} - - c1 := snapshots.Cursor() - for k, _ := c1.Last(); k != nil; k, _ = c1.Prev() { - _, snapshotEpoch, err := decodeUvarintAscending(k) - if err != nil { - log.Printf("RollbackPoints:"+ - " unable to parse segment epoch %x, continuing", k) - continue - } - - snapshot := snapshots.Bucket(k) - if snapshot == nil { - log.Printf("RollbackPoints:"+ - " snapshot key, but bucket missing %x, continuing", k) - continue - } - - meta := map[string][]byte{} - c2 := snapshot.Cursor() - for j, _ := c2.First(); j != nil; j, _ = c2.Next() { - if j[0] == boltInternalKey[0] { - internalBucket := snapshot.Bucket(j) - err = internalBucket.ForEach(func(key []byte, val []byte) error { - copiedVal := append([]byte(nil), val...) - meta[string(key)] = copiedVal - return nil - }) - if err != nil { - break - } - } - } - - if err != nil { - log.Printf("RollbackPoints:"+ - " failed in fetching internal data: %v", err) - continue - } - - rollbackPoints = append(rollbackPoints, &RollbackPoint{ - epoch: snapshotEpoch, - meta: meta, - }) - } - - return rollbackPoints, nil -} - -// Rollback atomically and durably brings the store back to the point -// in time as represented by the RollbackPoint. -// Rollback() should only be passed a RollbackPoint that came from the -// same store using the RollbackPoints() API along with the index path. -func Rollback(path string, to *RollbackPoint) error { - if to == nil { - return fmt.Errorf("Rollback: RollbackPoint is nil") - } - if len(path) == 0 { - return fmt.Errorf("Rollback: index path is empty") - } - - rootBoltPath := path + string(os.PathSeparator) + "root.bolt" - rootBoltOpt := &bolt.Options{ - ReadOnly: false, - } - rootBolt, err := bolt.Open(rootBoltPath, 0600, rootBoltOpt) - if err != nil || rootBolt == nil { - return err - } - defer func() { - err1 := rootBolt.Close() - if err1 != nil && err == nil { - err = err1 - } - }() - - // pick all the younger persisted epochs in bolt store - // including the target one. - var found bool - var eligibleEpochs []uint64 - err = rootBolt.View(func(tx *bolt.Tx) error { - snapshots := tx.Bucket(boltSnapshotsBucket) - if snapshots == nil { - return nil - } - sc := snapshots.Cursor() - for sk, _ := sc.Last(); sk != nil && !found; sk, _ = sc.Prev() { - _, snapshotEpoch, err := decodeUvarintAscending(sk) - if err != nil { - continue - } - if snapshotEpoch == to.epoch { - found = true - } - eligibleEpochs = append(eligibleEpochs, snapshotEpoch) - } - return nil - }) - - if len(eligibleEpochs) == 0 { - return fmt.Errorf("Rollback: no persisted epochs found in bolt") - } - if !found { - return fmt.Errorf("Rollback: target epoch %d not found in bolt", to.epoch) - } - - // start a write transaction - tx, err := rootBolt.Begin(true) - if err != nil { - return err - } - - defer func() { - if err == nil { - err = tx.Commit() - } else { - _ = tx.Rollback() - } - if err == nil { - err = rootBolt.Sync() - } - }() - - snapshots := tx.Bucket(boltSnapshotsBucket) - if snapshots == nil { - return nil - } - for _, epoch := range eligibleEpochs { - k := encodeUvarintAscending(nil, epoch) - if err != nil { - continue - } - if epoch == to.epoch { - // return here as it already processed until the given epoch - return nil - } - err = snapshots.DeleteBucket(k) - if err == bolt.ErrBucketNotFound { - err = nil - } - } - - return err -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/scorch.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/scorch.go deleted file mode 100644 index fa1aaebbf..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/scorch.go +++ /dev/null @@ -1,669 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "sync" - "sync/atomic" - "time" - - "github.com/RoaringBitmap/roaring" - "github.com/blevesearch/bleve/v2/registry" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - bolt "go.etcd.io/bbolt" -) - -const Name = "scorch" - -const Version uint8 = 2 - -var ErrClosed = fmt.Errorf("scorch closed") - -type Scorch struct { - nextSegmentID uint64 - stats Stats - iStats internalStats - - readOnly bool - version uint8 - config map[string]interface{} - analysisQueue *index.AnalysisQueue - path string - - unsafeBatch bool - - rootLock sync.RWMutex - root *IndexSnapshot // holds 1 ref-count on the root - rootPersisted []chan error // closed when root is persisted - persistedCallbacks []index.BatchCallback - nextSnapshotEpoch uint64 - eligibleForRemoval []uint64 // Index snapshot epochs that are safe to GC. - ineligibleForRemoval map[string]bool // Filenames that should not be GC'ed yet. - - numSnapshotsToKeep int - closeCh chan struct{} - introductions chan *segmentIntroduction - persists chan *persistIntroduction - merges chan *segmentMerge - introducerNotifier chan *epochWatcher - persisterNotifier chan *epochWatcher - rootBolt *bolt.DB - asyncTasks sync.WaitGroup - - onEvent func(event Event) - onAsyncError func(err error) - - forceMergeRequestCh chan *mergerCtrl - - segPlugin SegmentPlugin -} - -type internalStats struct { - persistEpoch uint64 - persistSnapshotSize uint64 - mergeEpoch uint64 - mergeSnapshotSize uint64 - newSegBufBytesAdded uint64 - newSegBufBytesRemoved uint64 - analysisBytesAdded uint64 - analysisBytesRemoved uint64 -} - -func NewScorch(storeName string, - config map[string]interface{}, - analysisQueue *index.AnalysisQueue) (index.Index, error) { - rv := &Scorch{ - version: Version, - config: config, - analysisQueue: analysisQueue, - nextSnapshotEpoch: 1, - closeCh: make(chan struct{}), - ineligibleForRemoval: map[string]bool{}, - forceMergeRequestCh: make(chan *mergerCtrl, 1), - segPlugin: defaultSegmentPlugin, - } - - forcedSegmentType, forcedSegmentVersion, err := configForceSegmentTypeVersion(config) - if err != nil { - return nil, err - } - if forcedSegmentType != "" && forcedSegmentVersion != 0 { - err := rv.loadSegmentPlugin(forcedSegmentType, - uint32(forcedSegmentVersion)) - if err != nil { - return nil, err - } - } - - rv.root = &IndexSnapshot{parent: rv, refs: 1, creator: "NewScorch"} - ro, ok := config["read_only"].(bool) - if ok { - rv.readOnly = ro - } - ub, ok := config["unsafe_batch"].(bool) - if ok { - rv.unsafeBatch = ub - } - ecbName, ok := config["eventCallbackName"].(string) - if ok { - rv.onEvent = RegistryEventCallbacks[ecbName] - } - aecbName, ok := config["asyncErrorCallbackName"].(string) - if ok { - rv.onAsyncError = RegistryAsyncErrorCallbacks[aecbName] - } - return rv, nil -} - -// configForceSegmentTypeVersion checks if the caller has requested a -// specific segment type/version -func configForceSegmentTypeVersion(config map[string]interface{}) (string, uint32, error) { - forcedSegmentVersion, err := parseToInteger(config["forceSegmentVersion"]) - if err != nil { - return "", 0, nil - } - - forcedSegmentType, ok := config["forceSegmentType"].(string) - if !ok { - return "", 0, fmt.Errorf( - "forceSegmentVersion set to %d, must also specify forceSegmentType", forcedSegmentVersion) - } - - return forcedSegmentType, uint32(forcedSegmentVersion), nil -} - -func (s *Scorch) NumEventsBlocking() uint64 { - eventsCompleted := atomic.LoadUint64(&s.stats.TotEventTriggerCompleted) - eventsStarted := atomic.LoadUint64(&s.stats.TotEventTriggerStarted) - return eventsStarted - eventsCompleted -} - -func (s *Scorch) fireEvent(kind EventKind, dur time.Duration) { - if s.onEvent != nil { - atomic.AddUint64(&s.stats.TotEventTriggerStarted, 1) - s.onEvent(Event{Kind: kind, Scorch: s, Duration: dur}) - atomic.AddUint64(&s.stats.TotEventTriggerCompleted, 1) - } -} - -func (s *Scorch) fireAsyncError(err error) { - if s.onAsyncError != nil { - s.onAsyncError(err) - } - atomic.AddUint64(&s.stats.TotOnErrors, 1) -} - -func (s *Scorch) Open() error { - err := s.openBolt() - if err != nil { - return err - } - - s.asyncTasks.Add(1) - go s.introducerLoop() - - if !s.readOnly && s.path != "" { - s.asyncTasks.Add(1) - go s.persisterLoop() - s.asyncTasks.Add(1) - go s.mergerLoop() - } - - return nil -} - -func (s *Scorch) openBolt() error { - var ok bool - s.path, ok = s.config["path"].(string) - if !ok { - return fmt.Errorf("must specify path") - } - if s.path == "" { - s.unsafeBatch = true - } - - var rootBoltOpt = *bolt.DefaultOptions - if s.readOnly { - rootBoltOpt.ReadOnly = true - } else { - if s.path != "" { - err := os.MkdirAll(s.path, 0700) - if err != nil { - return err - } - } - } - - if boltTimeoutStr, ok := s.config["bolt_timeout"].(string); ok { - var err error - boltTimeout, err := time.ParseDuration(boltTimeoutStr) - if err != nil { - return fmt.Errorf("invalid duration specified for bolt_timeout: %v", err) - } - rootBoltOpt.Timeout = boltTimeout - } - - rootBoltPath := s.path + string(os.PathSeparator) + "root.bolt" - var err error - if s.path != "" { - s.rootBolt, err = bolt.Open(rootBoltPath, 0600, &rootBoltOpt) - if err != nil { - return err - } - - // now see if there is any existing state to load - err = s.loadFromBolt() - if err != nil { - _ = s.Close() - return err - } - } - - atomic.StoreUint64(&s.stats.TotFileSegmentsAtRoot, uint64(len(s.root.segment))) - - s.introductions = make(chan *segmentIntroduction) - s.persists = make(chan *persistIntroduction) - s.merges = make(chan *segmentMerge) - s.introducerNotifier = make(chan *epochWatcher, 1) - s.persisterNotifier = make(chan *epochWatcher, 1) - s.closeCh = make(chan struct{}) - s.forceMergeRequestCh = make(chan *mergerCtrl, 1) - - if !s.readOnly && s.path != "" { - err := s.removeOldZapFiles() // Before persister or merger create any new files. - if err != nil { - _ = s.Close() - return err - } - } - - s.numSnapshotsToKeep = NumSnapshotsToKeep - if v, ok := s.config["numSnapshotsToKeep"]; ok { - var t int - if t, err = parseToInteger(v); err != nil { - return fmt.Errorf("numSnapshotsToKeep parse err: %v", err) - } - if t > 0 { - s.numSnapshotsToKeep = t - } - } - - return nil -} - -func (s *Scorch) Close() (err error) { - startTime := time.Now() - defer func() { - s.fireEvent(EventKindClose, time.Since(startTime)) - }() - - s.fireEvent(EventKindCloseStart, 0) - - // signal to async tasks we want to close - close(s.closeCh) - // wait for them to close - s.asyncTasks.Wait() - // now close the root bolt - if s.rootBolt != nil { - err = s.rootBolt.Close() - s.rootLock.Lock() - if s.root != nil { - err2 := s.root.DecRef() - if err == nil { - err = err2 - } - } - s.root = nil - s.rootLock.Unlock() - } - - return -} - -func (s *Scorch) Update(doc index.Document) error { - b := index.NewBatch() - b.Update(doc) - return s.Batch(b) -} - -func (s *Scorch) Delete(id string) error { - b := index.NewBatch() - b.Delete(id) - return s.Batch(b) -} - -// Batch applices a batch of changes to the index atomically -func (s *Scorch) Batch(batch *index.Batch) (err error) { - start := time.Now() - - defer func() { - s.fireEvent(EventKindBatchIntroduction, time.Since(start)) - }() - - resultChan := make(chan index.Document, len(batch.IndexOps)) - - var numUpdates uint64 - var numDeletes uint64 - var numPlainTextBytes uint64 - var ids []string - for docID, doc := range batch.IndexOps { - if doc != nil { - // insert _id field - doc.AddIDField() - numUpdates++ - numPlainTextBytes += doc.NumPlainTextBytes() - } else { - numDeletes++ - } - ids = append(ids, docID) - } - - // FIXME could sort ids list concurrent with analysis? - - if numUpdates > 0 { - go func() { - for k := range batch.IndexOps { - doc := batch.IndexOps[k] - if doc != nil { - // put the work on the queue - s.analysisQueue.Queue(func() { - analyze(doc) - resultChan <- doc - }) - } - } - }() - } - - // wait for analysis result - analysisResults := make([]index.Document, int(numUpdates)) - var itemsDeQueued uint64 - var totalAnalysisSize int - for itemsDeQueued < numUpdates { - result := <-resultChan - resultSize := result.Size() - atomic.AddUint64(&s.iStats.analysisBytesAdded, uint64(resultSize)) - totalAnalysisSize += resultSize - analysisResults[itemsDeQueued] = result - itemsDeQueued++ - } - close(resultChan) - defer atomic.AddUint64(&s.iStats.analysisBytesRemoved, uint64(totalAnalysisSize)) - - atomic.AddUint64(&s.stats.TotAnalysisTime, uint64(time.Since(start))) - - indexStart := time.Now() - - // notify handlers that we're about to introduce a segment - s.fireEvent(EventKindBatchIntroductionStart, 0) - - var newSegment segment.Segment - var bufBytes uint64 - if len(analysisResults) > 0 { - newSegment, bufBytes, err = s.segPlugin.New(analysisResults) - if err != nil { - return err - } - atomic.AddUint64(&s.iStats.newSegBufBytesAdded, bufBytes) - } else { - atomic.AddUint64(&s.stats.TotBatchesEmpty, 1) - } - - err = s.prepareSegment(newSegment, ids, batch.InternalOps, batch.PersistedCallback()) - if err != nil { - if newSegment != nil { - _ = newSegment.Close() - } - atomic.AddUint64(&s.stats.TotOnErrors, 1) - } else { - atomic.AddUint64(&s.stats.TotUpdates, numUpdates) - atomic.AddUint64(&s.stats.TotDeletes, numDeletes) - atomic.AddUint64(&s.stats.TotBatches, 1) - atomic.AddUint64(&s.stats.TotIndexedPlainTextBytes, numPlainTextBytes) - } - - atomic.AddUint64(&s.iStats.newSegBufBytesRemoved, bufBytes) - atomic.AddUint64(&s.stats.TotIndexTime, uint64(time.Since(indexStart))) - - return err -} - -func (s *Scorch) prepareSegment(newSegment segment.Segment, ids []string, - internalOps map[string][]byte, persistedCallback index.BatchCallback) error { - - // new introduction - introduction := &segmentIntroduction{ - id: atomic.AddUint64(&s.nextSegmentID, 1), - data: newSegment, - ids: ids, - obsoletes: make(map[uint64]*roaring.Bitmap), - internal: internalOps, - applied: make(chan error), - persistedCallback: persistedCallback, - } - - if !s.unsafeBatch { - introduction.persisted = make(chan error, 1) - } - - // optimistically prepare obsoletes outside of rootLock - s.rootLock.RLock() - root := s.root - root.AddRef() - s.rootLock.RUnlock() - - defer func() { _ = root.DecRef() }() - - for _, seg := range root.segment { - delta, err := seg.segment.DocNumbers(ids) - if err != nil { - return err - } - introduction.obsoletes[seg.id] = delta - } - - introStartTime := time.Now() - - s.introductions <- introduction - - // block until this segment is applied - err := <-introduction.applied - if err != nil { - return err - } - - if introduction.persisted != nil { - err = <-introduction.persisted - } - - introTime := uint64(time.Since(introStartTime)) - atomic.AddUint64(&s.stats.TotBatchIntroTime, introTime) - if atomic.LoadUint64(&s.stats.MaxBatchIntroTime) < introTime { - atomic.StoreUint64(&s.stats.MaxBatchIntroTime, introTime) - } - - return err -} - -func (s *Scorch) SetInternal(key, val []byte) error { - b := index.NewBatch() - b.SetInternal(key, val) - return s.Batch(b) -} - -func (s *Scorch) DeleteInternal(key []byte) error { - b := index.NewBatch() - b.DeleteInternal(key) - return s.Batch(b) -} - -// Reader returns a low-level accessor on the index data. Close it to -// release associated resources. -func (s *Scorch) Reader() (index.IndexReader, error) { - return s.currentSnapshot(), nil -} - -func (s *Scorch) currentSnapshot() *IndexSnapshot { - s.rootLock.RLock() - rv := s.root - if rv != nil { - rv.AddRef() - } - s.rootLock.RUnlock() - return rv -} - -func (s *Scorch) Stats() json.Marshaler { - return &s.stats -} - -func (s *Scorch) diskFileStats(rootSegmentPaths map[string]struct{}) (uint64, - uint64, uint64) { - var numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot uint64 - if s.path != "" { - finfos, err := ioutil.ReadDir(s.path) - if err == nil { - for _, finfo := range finfos { - if !finfo.IsDir() { - numBytesUsedDisk += uint64(finfo.Size()) - numFilesOnDisk++ - if rootSegmentPaths != nil { - fname := s.path + string(os.PathSeparator) + finfo.Name() - if _, fileAtRoot := rootSegmentPaths[fname]; fileAtRoot { - numBytesOnDiskByRoot += uint64(finfo.Size()) - } - } - } - } - } - } - // if no root files path given, then consider all disk files. - if rootSegmentPaths == nil { - return numFilesOnDisk, numBytesUsedDisk, numBytesUsedDisk - } - - return numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot -} - -func (s *Scorch) StatsMap() map[string]interface{} { - m := s.stats.ToMap() - - indexSnapshot := s.currentSnapshot() - defer func() { - _ = indexSnapshot.Close() - }() - - rootSegPaths := indexSnapshot.diskSegmentsPaths() - - s.rootLock.RLock() - m["CurFilesIneligibleForRemoval"] = uint64(len(s.ineligibleForRemoval)) - s.rootLock.RUnlock() - - numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot := s.diskFileStats(rootSegPaths) - - m["CurOnDiskBytes"] = numBytesUsedDisk - m["CurOnDiskFiles"] = numFilesOnDisk - - // TODO: consider one day removing these backwards compatible - // names for apps using the old names - m["updates"] = m["TotUpdates"] - m["deletes"] = m["TotDeletes"] - m["batches"] = m["TotBatches"] - m["errors"] = m["TotOnErrors"] - m["analysis_time"] = m["TotAnalysisTime"] - m["index_time"] = m["TotIndexTime"] - m["term_searchers_started"] = m["TotTermSearchersStarted"] - m["term_searchers_finished"] = m["TotTermSearchersFinished"] - m["num_plain_text_bytes_indexed"] = m["TotIndexedPlainTextBytes"] - m["num_items_introduced"] = m["TotIntroducedItems"] - m["num_items_persisted"] = m["TotPersistedItems"] - m["num_recs_to_persist"] = m["TotItemsToPersist"] - // total disk bytes found in index directory inclusive of older snapshots - m["num_bytes_used_disk"] = numBytesUsedDisk - // total disk bytes by the latest root index, exclusive of older snapshots - m["num_bytes_used_disk_by_root"] = numBytesOnDiskByRoot - // num_bytes_used_disk_by_root_reclaimable is an approximation about the - // reclaimable disk space in an index. (eg: from a full compaction) - m["num_bytes_used_disk_by_root_reclaimable"] = uint64(float64(numBytesOnDiskByRoot) * - indexSnapshot.reClaimableDocsRatio()) - m["num_files_on_disk"] = numFilesOnDisk - m["num_root_memorysegments"] = m["TotMemorySegmentsAtRoot"] - m["num_root_filesegments"] = m["TotFileSegmentsAtRoot"] - m["num_persister_nap_pause_completed"] = m["TotPersisterNapPauseCompleted"] - m["num_persister_nap_merger_break"] = m["TotPersisterMergerNapBreak"] - m["total_compaction_written_bytes"] = m["TotFileMergeWrittenBytes"] - - return m -} - -func (s *Scorch) Analyze(d index.Document) { - analyze(d) -} - -func analyze(d index.Document) { - d.VisitFields(func(field index.Field) { - if field.Options().IsIndexed() { - field.Analyze() - - if d.HasComposite() && field.Name() != "_id" { - // see if any of the composite fields need this - d.VisitComposite(func(cf index.CompositeField) { - cf.Compose(field.Name(), field.AnalyzedLength(), field.AnalyzedTokenFrequencies()) - }) - } - } - }) -} - -func (s *Scorch) AddEligibleForRemoval(epoch uint64) { - s.rootLock.Lock() - if s.root == nil || s.root.epoch != epoch { - s.eligibleForRemoval = append(s.eligibleForRemoval, epoch) - } - s.rootLock.Unlock() -} - -func (s *Scorch) MemoryUsed() (memUsed uint64) { - indexSnapshot := s.currentSnapshot() - if indexSnapshot == nil { - return - } - - defer func() { - _ = indexSnapshot.Close() - }() - - // Account for current root snapshot overhead - memUsed += uint64(indexSnapshot.Size()) - - // Account for snapshot that the persister may be working on - persistEpoch := atomic.LoadUint64(&s.iStats.persistEpoch) - persistSnapshotSize := atomic.LoadUint64(&s.iStats.persistSnapshotSize) - if persistEpoch != 0 && indexSnapshot.epoch > persistEpoch { - // the snapshot that the persister is working on isn't the same as - // the current snapshot - memUsed += persistSnapshotSize - } - - // Account for snapshot that the merger may be working on - mergeEpoch := atomic.LoadUint64(&s.iStats.mergeEpoch) - mergeSnapshotSize := atomic.LoadUint64(&s.iStats.mergeSnapshotSize) - if mergeEpoch != 0 && indexSnapshot.epoch > mergeEpoch { - // the snapshot that the merger is working on isn't the same as - // the current snapshot - memUsed += mergeSnapshotSize - } - - memUsed += (atomic.LoadUint64(&s.iStats.newSegBufBytesAdded) - - atomic.LoadUint64(&s.iStats.newSegBufBytesRemoved)) - - memUsed += (atomic.LoadUint64(&s.iStats.analysisBytesAdded) - - atomic.LoadUint64(&s.iStats.analysisBytesRemoved)) - - return memUsed -} - -func (s *Scorch) markIneligibleForRemoval(filename string) { - s.rootLock.Lock() - s.ineligibleForRemoval[filename] = true - s.rootLock.Unlock() -} - -func (s *Scorch) unmarkIneligibleForRemoval(filename string) { - s.rootLock.Lock() - delete(s.ineligibleForRemoval, filename) - s.rootLock.Unlock() -} - -func init() { - registry.RegisterIndexType(Name, NewScorch) -} - -func parseToInteger(i interface{}) (int, error) { - switch v := i.(type) { - case float64: - return int(v), nil - case int: - return v, nil - - default: - return 0, fmt.Errorf("expects int or float64 value") - } -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/segment_plugin.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/segment_plugin.go deleted file mode 100644 index ea40911fa..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/segment_plugin.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2019 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "fmt" - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - - segment "github.com/blevesearch/scorch_segment_api/v2" - - zapv11 "github.com/blevesearch/zapx/v11" - zapv12 "github.com/blevesearch/zapx/v12" - zapv13 "github.com/blevesearch/zapx/v13" - zapv14 "github.com/blevesearch/zapx/v14" - zapv15 "github.com/blevesearch/zapx/v15" -) - -// SegmentPlugin represents the essential functions required by a package to plug in -// it's segment implementation -type SegmentPlugin interface { - - // Type is the name for this segment plugin - Type() string - - // Version is a numeric value identifying a specific version of this type. - // When incompatible changes are made to a particular type of plugin, the - // version must be incremented. - Version() uint32 - - // New takes a set of Documents and turns them into a new Segment - New(results []index.Document) (segment.Segment, uint64, error) - - // Open attempts to open the file at the specified path and - // return the corresponding Segment - Open(path string) (segment.Segment, error) - - // Merge takes a set of Segments, and creates a new segment on disk at - // the specified path. - // Drops is a set of bitmaps (one for each segment) indicating which - // documents can be dropped from the segments during the merge. - // If the closeCh channel is closed, Merge will cease doing work at - // the next opportunity, and return an error (closed). - // StatsReporter can optionally be provided, in which case progress - // made during the merge is reported while operation continues. - // Returns: - // A slice of new document numbers (one for each input segment), - // this allows the caller to know a particular document's new - // document number in the newly merged segment. - // The number of bytes written to the new segment file. - // An error, if any occurred. - Merge(segments []segment.Segment, drops []*roaring.Bitmap, path string, - closeCh chan struct{}, s segment.StatsReporter) ( - [][]uint64, uint64, error) -} - -var supportedSegmentPlugins map[string]map[uint32]SegmentPlugin -var defaultSegmentPlugin SegmentPlugin - -func init() { - ResetSegmentPlugins() - RegisterSegmentPlugin(&zapv15.ZapPlugin{}, true) - RegisterSegmentPlugin(&zapv14.ZapPlugin{}, false) - RegisterSegmentPlugin(&zapv13.ZapPlugin{}, false) - RegisterSegmentPlugin(&zapv12.ZapPlugin{}, false) - RegisterSegmentPlugin(&zapv11.ZapPlugin{}, false) -} - -func ResetSegmentPlugins() { - supportedSegmentPlugins = map[string]map[uint32]SegmentPlugin{} -} - -func RegisterSegmentPlugin(plugin SegmentPlugin, makeDefault bool) { - if _, ok := supportedSegmentPlugins[plugin.Type()]; !ok { - supportedSegmentPlugins[plugin.Type()] = map[uint32]SegmentPlugin{} - } - supportedSegmentPlugins[plugin.Type()][plugin.Version()] = plugin - if makeDefault { - defaultSegmentPlugin = plugin - } -} - -func SupportedSegmentTypes() (rv []string) { - for k := range supportedSegmentPlugins { - rv = append(rv, k) - } - return -} - -func SupportedSegmentTypeVersions(typ string) (rv []uint32) { - for k := range supportedSegmentPlugins[typ] { - rv = append(rv, k) - } - return rv -} - -func chooseSegmentPlugin(forcedSegmentType string, - forcedSegmentVersion uint32) (SegmentPlugin, error) { - if versions, ok := supportedSegmentPlugins[forcedSegmentType]; ok { - if segPlugin, ok := versions[uint32(forcedSegmentVersion)]; ok { - return segPlugin, nil - } - return nil, fmt.Errorf( - "unsupported version %d for segment type: %s, supported: %v", - forcedSegmentVersion, forcedSegmentType, - SupportedSegmentTypeVersions(forcedSegmentType)) - } - return nil, fmt.Errorf("unsupported segment type: %s, supported: %v", - forcedSegmentType, SupportedSegmentTypes()) -} - -func (s *Scorch) loadSegmentPlugin(forcedSegmentType string, - forcedSegmentVersion uint32) error { - segPlugin, err := chooseSegmentPlugin(forcedSegmentType, - forcedSegmentVersion) - if err != nil { - return err - } - s.segPlugin = segPlugin - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index.go deleted file mode 100644 index ac2f34412..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index.go +++ /dev/null @@ -1,764 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "container/heap" - "encoding/binary" - "fmt" - "reflect" - "sort" - "sync" - "sync/atomic" - - "github.com/RoaringBitmap/roaring" - "github.com/blevesearch/bleve/v2/document" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - lev "github.com/blevesearch/vellum/levenshtein" -) - -// re usable, threadsafe levenshtein builders -var lb1, lb2 *lev.LevenshteinAutomatonBuilder - -type asynchSegmentResult struct { - dict segment.TermDictionary - dictItr segment.DictionaryIterator - - index int - docs *roaring.Bitmap - - postings segment.PostingsList - - err error -} - -var reflectStaticSizeIndexSnapshot int - -func init() { - var is interface{} = IndexSnapshot{} - reflectStaticSizeIndexSnapshot = int(reflect.TypeOf(is).Size()) - var err error - lb1, err = lev.NewLevenshteinAutomatonBuilder(1, true) - if err != nil { - panic(fmt.Errorf("Levenshtein automaton ed1 builder err: %v", err)) - } - lb2, err = lev.NewLevenshteinAutomatonBuilder(2, true) - if err != nil { - panic(fmt.Errorf("Levenshtein automaton ed2 builder err: %v", err)) - } -} - -type IndexSnapshot struct { - parent *Scorch - segment []*SegmentSnapshot - offsets []uint64 - internal map[string][]byte - epoch uint64 - size uint64 - creator string - - m sync.Mutex // Protects the fields that follow. - refs int64 - - m2 sync.Mutex // Protects the fields that follow. - fieldTFRs map[string][]*IndexSnapshotTermFieldReader // keyed by field, recycled TFR's -} - -func (i *IndexSnapshot) Segments() []*SegmentSnapshot { - return i.segment -} - -func (i *IndexSnapshot) Internal() map[string][]byte { - return i.internal -} - -func (i *IndexSnapshot) AddRef() { - i.m.Lock() - i.refs++ - i.m.Unlock() -} - -func (i *IndexSnapshot) DecRef() (err error) { - i.m.Lock() - i.refs-- - if i.refs == 0 { - for _, s := range i.segment { - if s != nil { - err2 := s.segment.DecRef() - if err == nil { - err = err2 - } - } - } - if i.parent != nil { - go i.parent.AddEligibleForRemoval(i.epoch) - } - } - i.m.Unlock() - return err -} - -func (i *IndexSnapshot) Close() error { - return i.DecRef() -} - -func (i *IndexSnapshot) Size() int { - return int(i.size) -} - -func (i *IndexSnapshot) updateSize() { - i.size += uint64(reflectStaticSizeIndexSnapshot) - for _, s := range i.segment { - i.size += uint64(s.Size()) - } -} - -func (i *IndexSnapshot) newIndexSnapshotFieldDict(field string, - makeItr func(i segment.TermDictionary) segment.DictionaryIterator, - randomLookup bool) (*IndexSnapshotFieldDict, error) { - - results := make(chan *asynchSegmentResult) - for index, segment := range i.segment { - go func(index int, segment *SegmentSnapshot) { - dict, err := segment.segment.Dictionary(field) - if err != nil { - results <- &asynchSegmentResult{err: err} - } else { - if randomLookup { - results <- &asynchSegmentResult{dict: dict} - } else { - results <- &asynchSegmentResult{dictItr: makeItr(dict)} - } - } - }(index, segment) - } - - var err error - rv := &IndexSnapshotFieldDict{ - snapshot: i, - cursors: make([]*segmentDictCursor, 0, len(i.segment)), - } - for count := 0; count < len(i.segment); count++ { - asr := <-results - if asr.err != nil && err == nil { - err = asr.err - } else { - if !randomLookup { - next, err2 := asr.dictItr.Next() - if err2 != nil && err == nil { - err = err2 - } - if next != nil { - rv.cursors = append(rv.cursors, &segmentDictCursor{ - itr: asr.dictItr, - curr: *next, - }) - } - } else { - rv.cursors = append(rv.cursors, &segmentDictCursor{ - dict: asr.dict, - }) - } - } - } - // after ensuring we've read all items on channel - if err != nil { - return nil, err - } - - if !randomLookup { - // prepare heap - heap.Init(rv) - } - - return rv, nil -} - -func (i *IndexSnapshot) FieldDict(field string) (index.FieldDict, error) { - return i.newIndexSnapshotFieldDict(field, func(i segment.TermDictionary) segment.DictionaryIterator { - return i.AutomatonIterator(nil, nil, nil) - }, false) -} - -// calculateExclusiveEndFromInclusiveEnd produces the next key -// when sorting using memcmp style comparisons, suitable to -// use as the end key in a traditional (inclusive, exclusive] -// start/end range -func calculateExclusiveEndFromInclusiveEnd(inclusiveEnd []byte) []byte { - rv := inclusiveEnd - if len(inclusiveEnd) > 0 { - rv = make([]byte, len(inclusiveEnd)) - copy(rv, inclusiveEnd) - if rv[len(rv)-1] < 0xff { - // last byte can be incremented by one - rv[len(rv)-1]++ - } else { - // last byte is already 0xff, so append 0 - // next key is simply one byte longer - rv = append(rv, 0x0) - } - } - return rv -} - -func (i *IndexSnapshot) FieldDictRange(field string, startTerm []byte, - endTerm []byte) (index.FieldDict, error) { - return i.newIndexSnapshotFieldDict(field, func(i segment.TermDictionary) segment.DictionaryIterator { - endTermExclusive := calculateExclusiveEndFromInclusiveEnd(endTerm) - return i.AutomatonIterator(nil, startTerm, endTermExclusive) - }, false) -} - -// calculateExclusiveEndFromPrefix produces the first key that -// does not have the same prefix as the input bytes, suitable -// to use as the end key in a traditional (inclusive, exclusive] -// start/end range -func calculateExclusiveEndFromPrefix(in []byte) []byte { - rv := make([]byte, len(in)) - copy(rv, in) - for i := len(rv) - 1; i >= 0; i-- { - rv[i] = rv[i] + 1 - if rv[i] != 0 { - return rv // didn't overflow, so stop - } - } - // all bytes were 0xff, so return nil - // as there is no end key for this prefix - return nil -} - -func (i *IndexSnapshot) FieldDictPrefix(field string, - termPrefix []byte) (index.FieldDict, error) { - termPrefixEnd := calculateExclusiveEndFromPrefix(termPrefix) - return i.newIndexSnapshotFieldDict(field, func(i segment.TermDictionary) segment.DictionaryIterator { - return i.AutomatonIterator(nil, termPrefix, termPrefixEnd) - }, false) -} - -func (i *IndexSnapshot) FieldDictRegexp(field string, - termRegex string) (index.FieldDict, error) { - // TODO: potential optimization where the literal prefix represents the, - // entire regexp, allowing us to use PrefixIterator(prefixTerm)? - - a, prefixBeg, prefixEnd, err := parseRegexp(termRegex) - if err != nil { - return nil, err - } - - return i.newIndexSnapshotFieldDict(field, func(i segment.TermDictionary) segment.DictionaryIterator { - return i.AutomatonIterator(a, prefixBeg, prefixEnd) - }, false) -} - -func (i *IndexSnapshot) getLevAutomaton(term string, - fuzziness uint8) (vellum.Automaton, error) { - if fuzziness == 1 { - return lb1.BuildDfa(term, fuzziness) - } else if fuzziness == 2 { - return lb2.BuildDfa(term, fuzziness) - } - return nil, fmt.Errorf("fuzziness exceeds the max limit") -} - -func (i *IndexSnapshot) FieldDictFuzzy(field string, - term string, fuzziness int, prefix string) (index.FieldDict, error) { - a, err := i.getLevAutomaton(term, uint8(fuzziness)) - if err != nil { - return nil, err - } - - var prefixBeg, prefixEnd []byte - if prefix != "" { - prefixBeg = []byte(prefix) - prefixEnd = calculateExclusiveEndFromPrefix(prefixBeg) - } - - return i.newIndexSnapshotFieldDict(field, func(i segment.TermDictionary) segment.DictionaryIterator { - return i.AutomatonIterator(a, prefixBeg, prefixEnd) - }, false) -} - -func (i *IndexSnapshot) FieldDictContains(field string) (index.FieldDictContains, error) { - return i.newIndexSnapshotFieldDict(field, nil, true) -} - -func (i *IndexSnapshot) DocIDReaderAll() (index.DocIDReader, error) { - results := make(chan *asynchSegmentResult) - for index, segment := range i.segment { - go func(index int, segment *SegmentSnapshot) { - results <- &asynchSegmentResult{ - index: index, - docs: segment.DocNumbersLive(), - } - }(index, segment) - } - - return i.newDocIDReader(results) -} - -func (i *IndexSnapshot) DocIDReaderOnly(ids []string) (index.DocIDReader, error) { - results := make(chan *asynchSegmentResult) - for index, segment := range i.segment { - go func(index int, segment *SegmentSnapshot) { - docs, err := segment.DocNumbers(ids) - if err != nil { - results <- &asynchSegmentResult{err: err} - } else { - results <- &asynchSegmentResult{ - index: index, - docs: docs, - } - } - }(index, segment) - } - - return i.newDocIDReader(results) -} - -func (i *IndexSnapshot) newDocIDReader(results chan *asynchSegmentResult) (index.DocIDReader, error) { - rv := &IndexSnapshotDocIDReader{ - snapshot: i, - iterators: make([]roaring.IntIterable, len(i.segment)), - } - var err error - for count := 0; count < len(i.segment); count++ { - asr := <-results - if asr.err != nil { - if err == nil { - // returns the first error encountered - err = asr.err - } - } else if err == nil { - rv.iterators[asr.index] = asr.docs.Iterator() - } - } - - if err != nil { - return nil, err - } - - return rv, nil -} - -func (i *IndexSnapshot) Fields() ([]string, error) { - // FIXME not making this concurrent for now as it's not used in hot path - // of any searches at the moment (just a debug aid) - fieldsMap := map[string]struct{}{} - for _, segment := range i.segment { - fields := segment.Fields() - for _, field := range fields { - fieldsMap[field] = struct{}{} - } - } - rv := make([]string, 0, len(fieldsMap)) - for k := range fieldsMap { - rv = append(rv, k) - } - return rv, nil -} - -func (i *IndexSnapshot) GetInternal(key []byte) ([]byte, error) { - return i.internal[string(key)], nil -} - -func (i *IndexSnapshot) DocCount() (uint64, error) { - var rv uint64 - for _, segment := range i.segment { - rv += segment.Count() - } - return rv, nil -} - -func (i *IndexSnapshot) Document(id string) (rv index.Document, err error) { - // FIXME could be done more efficiently directly, but reusing for simplicity - tfr, err := i.TermFieldReader([]byte(id), "_id", false, false, false) - if err != nil { - return nil, err - } - defer func() { - if cerr := tfr.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - next, err := tfr.Next(nil) - if err != nil { - return nil, err - } - - if next == nil { - // no such doc exists - return nil, nil - } - - docNum, err := docInternalToNumber(next.ID) - if err != nil { - return nil, err - } - segmentIndex, localDocNum := i.segmentIndexAndLocalDocNumFromGlobal(docNum) - - rvd := document.NewDocument(id) - err = i.segment[segmentIndex].VisitDocument(localDocNum, func(name string, typ byte, val []byte, pos []uint64) bool { - if name == "_id" { - return true - } - - // copy value, array positions to preserve them beyond the scope of this callback - value := append([]byte(nil), val...) - arrayPos := append([]uint64(nil), pos...) - - switch typ { - case 't': - rvd.AddField(document.NewTextField(name, arrayPos, value)) - case 'n': - rvd.AddField(document.NewNumericFieldFromBytes(name, arrayPos, value)) - case 'd': - rvd.AddField(document.NewDateTimeFieldFromBytes(name, arrayPos, value)) - case 'b': - rvd.AddField(document.NewBooleanFieldFromBytes(name, arrayPos, value)) - case 'g': - rvd.AddField(document.NewGeoPointFieldFromBytes(name, arrayPos, value)) - } - - return true - }) - if err != nil { - return nil, err - } - - return rvd, nil -} - -func (i *IndexSnapshot) segmentIndexAndLocalDocNumFromGlobal(docNum uint64) (int, uint64) { - segmentIndex := sort.Search(len(i.offsets), - func(x int) bool { - return i.offsets[x] > docNum - }) - 1 - - localDocNum := docNum - i.offsets[segmentIndex] - return int(segmentIndex), localDocNum -} - -func (i *IndexSnapshot) ExternalID(id index.IndexInternalID) (string, error) { - docNum, err := docInternalToNumber(id) - if err != nil { - return "", err - } - segmentIndex, localDocNum := i.segmentIndexAndLocalDocNumFromGlobal(docNum) - - v, err := i.segment[segmentIndex].DocID(localDocNum) - if err != nil { - return "", err - } - if v == nil { - return "", fmt.Errorf("document number %d not found", docNum) - } - - return string(v), nil -} - -func (i *IndexSnapshot) InternalID(id string) (rv index.IndexInternalID, err error) { - // FIXME could be done more efficiently directly, but reusing for simplicity - tfr, err := i.TermFieldReader([]byte(id), "_id", false, false, false) - if err != nil { - return nil, err - } - defer func() { - if cerr := tfr.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - next, err := tfr.Next(nil) - if err != nil || next == nil { - return nil, err - } - - return next.ID, nil -} - -func (i *IndexSnapshot) TermFieldReader(term []byte, field string, includeFreq, - includeNorm, includeTermVectors bool) (index.TermFieldReader, error) { - rv := i.allocTermFieldReaderDicts(field) - - rv.term = term - rv.field = field - rv.snapshot = i - if rv.postings == nil { - rv.postings = make([]segment.PostingsList, len(i.segment)) - } - if rv.iterators == nil { - rv.iterators = make([]segment.PostingsIterator, len(i.segment)) - } - rv.segmentOffset = 0 - rv.includeFreq = includeFreq - rv.includeNorm = includeNorm - rv.includeTermVectors = includeTermVectors - rv.currPosting = nil - rv.currID = rv.currID[:0] - - if rv.dicts == nil { - rv.dicts = make([]segment.TermDictionary, len(i.segment)) - for i, segment := range i.segment { - dict, err := segment.segment.Dictionary(field) - if err != nil { - return nil, err - } - rv.dicts[i] = dict - } - } - - for i, segment := range i.segment { - pl, err := rv.dicts[i].PostingsList(term, segment.deleted, rv.postings[i]) - if err != nil { - return nil, err - } - rv.postings[i] = pl - rv.iterators[i] = pl.Iterator(includeFreq, includeNorm, includeTermVectors, rv.iterators[i]) - } - atomic.AddUint64(&i.parent.stats.TotTermSearchersStarted, uint64(1)) - return rv, nil -} - -func (i *IndexSnapshot) allocTermFieldReaderDicts(field string) (tfr *IndexSnapshotTermFieldReader) { - i.m2.Lock() - if i.fieldTFRs != nil { - tfrs := i.fieldTFRs[field] - last := len(tfrs) - 1 - if last >= 0 { - tfr = tfrs[last] - tfrs[last] = nil - i.fieldTFRs[field] = tfrs[:last] - i.m2.Unlock() - return - } - } - i.m2.Unlock() - return &IndexSnapshotTermFieldReader{ - recycle: true, - } -} - -func (i *IndexSnapshot) recycleTermFieldReader(tfr *IndexSnapshotTermFieldReader) { - if !tfr.recycle { - // Do not recycle an optimized unadorned term field reader (used for - // ConjunctionUnadorned or DisjunctionUnadorned), during when a fresh - // roaring.Bitmap is built by AND-ing or OR-ing individual bitmaps, - // and we'll need to release them for GC. (See MB-40916) - return - } - - i.parent.rootLock.RLock() - obsolete := i.parent.root != i - i.parent.rootLock.RUnlock() - if obsolete { - // if we're not the current root (mutations happened), don't bother recycling - return - } - - i.m2.Lock() - if i.fieldTFRs == nil { - i.fieldTFRs = map[string][]*IndexSnapshotTermFieldReader{} - } - i.fieldTFRs[tfr.field] = append(i.fieldTFRs[tfr.field], tfr) - i.m2.Unlock() -} - -func docNumberToBytes(buf []byte, in uint64) []byte { - if len(buf) != 8 { - if cap(buf) >= 8 { - buf = buf[0:8] - } else { - buf = make([]byte, 8) - } - } - binary.BigEndian.PutUint64(buf, in) - return buf -} - -func docInternalToNumber(in index.IndexInternalID) (uint64, error) { - if len(in) != 8 { - return 0, fmt.Errorf("wrong len for IndexInternalID: %q", in) - } - return binary.BigEndian.Uint64(in), nil -} - -func (i *IndexSnapshot) documentVisitFieldTermsOnSegment( - segmentIndex int, localDocNum uint64, fields []string, cFields []string, - visitor index.DocValueVisitor, dvs segment.DocVisitState) ( - cFieldsOut []string, dvsOut segment.DocVisitState, err error) { - ss := i.segment[segmentIndex] - - var vFields []string // fields that are visitable via the segment - - ssv, ssvOk := ss.segment.(segment.DocValueVisitable) - if ssvOk && ssv != nil { - vFields, err = ssv.VisitableDocValueFields() - if err != nil { - return nil, nil, err - } - } - - var errCh chan error - - // cFields represents the fields that we'll need from the - // cachedDocs, and might be optionally be provided by the caller, - // if the caller happens to know we're on the same segmentIndex - // from a previous invocation - if cFields == nil { - cFields = subtractStrings(fields, vFields) - - if !ss.cachedDocs.hasFields(cFields) { - errCh = make(chan error, 1) - - go func() { - err := ss.cachedDocs.prepareFields(cFields, ss) - if err != nil { - errCh <- err - } - close(errCh) - }() - } - } - - if ssvOk && ssv != nil && len(vFields) > 0 { - dvs, err = ssv.VisitDocValues(localDocNum, fields, visitor, dvs) - if err != nil { - return nil, nil, err - } - } - - if errCh != nil { - err = <-errCh - if err != nil { - return nil, nil, err - } - } - - if len(cFields) > 0 { - ss.cachedDocs.visitDoc(localDocNum, cFields, visitor) - } - - return cFields, dvs, nil -} - -func (i *IndexSnapshot) DocValueReader(fields []string) ( - index.DocValueReader, error) { - return &DocValueReader{i: i, fields: fields, currSegmentIndex: -1}, nil -} - -type DocValueReader struct { - i *IndexSnapshot - fields []string - dvs segment.DocVisitState - - currSegmentIndex int - currCachedFields []string -} - -func (dvr *DocValueReader) VisitDocValues(id index.IndexInternalID, - visitor index.DocValueVisitor) (err error) { - docNum, err := docInternalToNumber(id) - if err != nil { - return err - } - - segmentIndex, localDocNum := dvr.i.segmentIndexAndLocalDocNumFromGlobal(docNum) - if segmentIndex >= len(dvr.i.segment) { - return nil - } - - if dvr.currSegmentIndex != segmentIndex { - dvr.currSegmentIndex = segmentIndex - dvr.currCachedFields = nil - } - - dvr.currCachedFields, dvr.dvs, err = dvr.i.documentVisitFieldTermsOnSegment( - dvr.currSegmentIndex, localDocNum, dvr.fields, dvr.currCachedFields, visitor, dvr.dvs) - - return err -} - -func (i *IndexSnapshot) DumpAll() chan interface{} { - rv := make(chan interface{}) - go func() { - close(rv) - }() - return rv -} - -func (i *IndexSnapshot) DumpDoc(id string) chan interface{} { - rv := make(chan interface{}) - go func() { - close(rv) - }() - return rv -} - -func (i *IndexSnapshot) DumpFields() chan interface{} { - rv := make(chan interface{}) - go func() { - close(rv) - }() - return rv -} - -func (i *IndexSnapshot) diskSegmentsPaths() map[string]struct{} { - rv := make(map[string]struct{}, len(i.segment)) - for _, segmentSnapshot := range i.segment { - if seg, ok := segmentSnapshot.segment.(segment.PersistedSegment); ok { - rv[seg.Path()] = struct{}{} - } - } - return rv -} - -// reClaimableDocsRatio gives a ratio about the obsoleted or -// reclaimable documents present in a given index snapshot. -func (i *IndexSnapshot) reClaimableDocsRatio() float64 { - var totalCount, liveCount uint64 - for _, segmentSnapshot := range i.segment { - if _, ok := segmentSnapshot.segment.(segment.PersistedSegment); ok { - totalCount += uint64(segmentSnapshot.FullSize()) - liveCount += uint64(segmentSnapshot.Count()) - } - } - - if totalCount > 0 { - return float64(totalCount-liveCount) / float64(totalCount) - } - return 0 -} - -// subtractStrings returns set a minus elements of set b. -func subtractStrings(a, b []string) []string { - if len(b) == 0 { - return a - } - - rv := make([]string, 0, len(a)) -OUTER: - for _, as := range a { - for _, bs := range b { - if as == bs { - continue OUTER - } - } - rv = append(rv, as) - } - return rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_doc.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_doc.go deleted file mode 100644 index fe174e7e3..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_doc.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "bytes" - "reflect" - - "github.com/RoaringBitmap/roaring" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeIndexSnapshotDocIDReader int - -func init() { - var isdr IndexSnapshotDocIDReader - reflectStaticSizeIndexSnapshotDocIDReader = int(reflect.TypeOf(isdr).Size()) -} - -type IndexSnapshotDocIDReader struct { - snapshot *IndexSnapshot - iterators []roaring.IntIterable - segmentOffset int -} - -func (i *IndexSnapshotDocIDReader) Size() int { - return reflectStaticSizeIndexSnapshotDocIDReader + size.SizeOfPtr -} - -func (i *IndexSnapshotDocIDReader) Next() (index.IndexInternalID, error) { - for i.segmentOffset < len(i.iterators) { - if !i.iterators[i.segmentOffset].HasNext() { - i.segmentOffset++ - continue - } - next := i.iterators[i.segmentOffset].Next() - // make segment number into global number by adding offset - globalOffset := i.snapshot.offsets[i.segmentOffset] - return docNumberToBytes(nil, uint64(next)+globalOffset), nil - } - return nil, nil -} - -func (i *IndexSnapshotDocIDReader) Advance(ID index.IndexInternalID) (index.IndexInternalID, error) { - // FIXME do something better - next, err := i.Next() - if err != nil { - return nil, err - } - if next == nil { - return nil, nil - } - for bytes.Compare(next, ID) < 0 { - next, err = i.Next() - if err != nil { - return nil, err - } - if next == nil { - break - } - } - return next, nil -} - -func (i *IndexSnapshotDocIDReader) Close() error { - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_tfr.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_tfr.go deleted file mode 100644 index e983e3dd2..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_tfr.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "bytes" - "fmt" - "reflect" - "sync/atomic" - - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -var reflectStaticSizeIndexSnapshotTermFieldReader int - -func init() { - var istfr IndexSnapshotTermFieldReader - reflectStaticSizeIndexSnapshotTermFieldReader = int(reflect.TypeOf(istfr).Size()) -} - -type IndexSnapshotTermFieldReader struct { - term []byte - field string - snapshot *IndexSnapshot - dicts []segment.TermDictionary - postings []segment.PostingsList - iterators []segment.PostingsIterator - segmentOffset int - includeFreq bool - includeNorm bool - includeTermVectors bool - currPosting segment.Posting - currID index.IndexInternalID - recycle bool -} - -func (i *IndexSnapshotTermFieldReader) Size() int { - sizeInBytes := reflectStaticSizeIndexSnapshotTermFieldReader + size.SizeOfPtr + - len(i.term) + - len(i.field) + - len(i.currID) - - for _, entry := range i.postings { - sizeInBytes += entry.Size() - } - - for _, entry := range i.iterators { - sizeInBytes += entry.Size() - } - - if i.currPosting != nil { - sizeInBytes += i.currPosting.Size() - } - - return sizeInBytes -} - -func (i *IndexSnapshotTermFieldReader) Next(preAlloced *index.TermFieldDoc) (*index.TermFieldDoc, error) { - rv := preAlloced - if rv == nil { - rv = &index.TermFieldDoc{} - } - // find the next hit - for i.segmentOffset < len(i.iterators) { - next, err := i.iterators[i.segmentOffset].Next() - if err != nil { - return nil, err - } - if next != nil { - // make segment number into global number by adding offset - globalOffset := i.snapshot.offsets[i.segmentOffset] - nnum := next.Number() - rv.ID = docNumberToBytes(rv.ID, nnum+globalOffset) - i.postingToTermFieldDoc(next, rv) - - i.currID = rv.ID - i.currPosting = next - return rv, nil - } - i.segmentOffset++ - } - return nil, nil -} - -func (i *IndexSnapshotTermFieldReader) postingToTermFieldDoc(next segment.Posting, rv *index.TermFieldDoc) { - if i.includeFreq { - rv.Freq = next.Frequency() - } - if i.includeNorm { - rv.Norm = next.Norm() - } - if i.includeTermVectors { - locs := next.Locations() - if cap(rv.Vectors) < len(locs) { - rv.Vectors = make([]*index.TermFieldVector, len(locs)) - backing := make([]index.TermFieldVector, len(locs)) - for i := range backing { - rv.Vectors[i] = &backing[i] - } - } - rv.Vectors = rv.Vectors[:len(locs)] - for i, loc := range locs { - *rv.Vectors[i] = index.TermFieldVector{ - Start: loc.Start(), - End: loc.End(), - Pos: loc.Pos(), - ArrayPositions: loc.ArrayPositions(), - Field: loc.Field(), - } - } - } -} - -func (i *IndexSnapshotTermFieldReader) Advance(ID index.IndexInternalID, preAlloced *index.TermFieldDoc) (*index.TermFieldDoc, error) { - // FIXME do something better - // for now, if we need to seek backwards, then restart from the beginning - if i.currPosting != nil && bytes.Compare(i.currID, ID) >= 0 { - i2, err := i.snapshot.TermFieldReader(i.term, i.field, - i.includeFreq, i.includeNorm, i.includeTermVectors) - if err != nil { - return nil, err - } - // close the current term field reader before replacing it with a new one - _ = i.Close() - *i = *(i2.(*IndexSnapshotTermFieldReader)) - } - num, err := docInternalToNumber(ID) - if err != nil { - return nil, fmt.Errorf("error converting to doc number % x - %v", ID, err) - } - segIndex, ldocNum := i.snapshot.segmentIndexAndLocalDocNumFromGlobal(num) - if segIndex >= len(i.snapshot.segment) { - return nil, fmt.Errorf("computed segment index %d out of bounds %d", - segIndex, len(i.snapshot.segment)) - } - // skip directly to the target segment - i.segmentOffset = segIndex - next, err := i.iterators[i.segmentOffset].Advance(ldocNum) - if err != nil { - return nil, err - } - if next == nil { - // we jumped directly to the segment that should have contained it - // but it wasn't there, so reuse Next() which should correctly - // get the next hit after it (we moved i.segmentOffset) - return i.Next(preAlloced) - } - - if preAlloced == nil { - preAlloced = &index.TermFieldDoc{} - } - preAlloced.ID = docNumberToBytes(preAlloced.ID, next.Number()+ - i.snapshot.offsets[segIndex]) - i.postingToTermFieldDoc(next, preAlloced) - i.currID = preAlloced.ID - i.currPosting = next - return preAlloced, nil -} - -func (i *IndexSnapshotTermFieldReader) Count() uint64 { - var rv uint64 - for _, posting := range i.postings { - rv += posting.Count() - } - return rv -} - -func (i *IndexSnapshotTermFieldReader) Close() error { - if i.snapshot != nil { - atomic.AddUint64(&i.snapshot.parent.stats.TotTermSearchersFinished, uint64(1)) - i.snapshot.recycleTermFieldReader(i) - } - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_segment.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_segment.go deleted file mode 100644 index e017eb2a9..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_segment.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorch - -import ( - "bytes" - "sync" - "sync/atomic" - - "github.com/RoaringBitmap/roaring" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -var TermSeparator byte = 0xff - -var TermSeparatorSplitSlice = []byte{TermSeparator} - -type SegmentSnapshot struct { - id uint64 - segment segment.Segment - deleted *roaring.Bitmap - creator string - - cachedDocs *cachedDocs -} - -func (s *SegmentSnapshot) Segment() segment.Segment { - return s.segment -} - -func (s *SegmentSnapshot) Deleted() *roaring.Bitmap { - return s.deleted -} - -func (s *SegmentSnapshot) Id() uint64 { - return s.id -} - -func (s *SegmentSnapshot) FullSize() int64 { - return int64(s.segment.Count()) -} - -func (s SegmentSnapshot) LiveSize() int64 { - return int64(s.Count()) -} - -func (s *SegmentSnapshot) Close() error { - return s.segment.Close() -} - -func (s *SegmentSnapshot) VisitDocument(num uint64, visitor segment.StoredFieldValueVisitor) error { - return s.segment.VisitStoredFields(num, visitor) -} - -func (s *SegmentSnapshot) DocID(num uint64) ([]byte, error) { - return s.segment.DocID(num) -} - -func (s *SegmentSnapshot) Count() uint64 { - rv := s.segment.Count() - if s.deleted != nil { - rv -= s.deleted.GetCardinality() - } - return rv -} - -func (s *SegmentSnapshot) DocNumbers(docIDs []string) (*roaring.Bitmap, error) { - rv, err := s.segment.DocNumbers(docIDs) - if err != nil { - return nil, err - } - if s.deleted != nil { - rv.AndNot(s.deleted) - } - return rv, nil -} - -// DocNumbersLive returns a bitmap containing doc numbers for all live docs -func (s *SegmentSnapshot) DocNumbersLive() *roaring.Bitmap { - rv := roaring.NewBitmap() - rv.AddRange(0, s.segment.Count()) - if s.deleted != nil { - rv.AndNot(s.deleted) - } - return rv -} - -func (s *SegmentSnapshot) Fields() []string { - return s.segment.Fields() -} - -func (s *SegmentSnapshot) Size() (rv int) { - rv = s.segment.Size() - if s.deleted != nil { - rv += int(s.deleted.GetSizeInBytes()) - } - rv += s.cachedDocs.Size() - return -} - -type cachedFieldDocs struct { - m sync.Mutex - readyCh chan struct{} // closed when the cachedFieldDocs.docs is ready to be used. - err error // Non-nil if there was an error when preparing this cachedFieldDocs. - docs map[uint64][]byte // Keyed by localDocNum, value is a list of terms delimited by 0xFF. - size uint64 -} - -func (cfd *cachedFieldDocs) Size() int { - var rv int - cfd.m.Lock() - for _, entry := range cfd.docs { - rv += 8 /* size of uint64 */ + len(entry) - } - cfd.m.Unlock() - return rv -} - -func (cfd *cachedFieldDocs) prepareField(field string, ss *SegmentSnapshot) { - cfd.m.Lock() - defer func() { - close(cfd.readyCh) - cfd.m.Unlock() - }() - - cfd.size += uint64(size.SizeOfUint64) /* size field */ - dict, err := ss.segment.Dictionary(field) - if err != nil { - cfd.err = err - return - } - - var postings segment.PostingsList - var postingsItr segment.PostingsIterator - - dictItr := dict.AutomatonIterator(nil, nil, nil) - next, err := dictItr.Next() - for err == nil && next != nil { - var err1 error - postings, err1 = dict.PostingsList([]byte(next.Term), nil, postings) - if err1 != nil { - cfd.err = err1 - return - } - - cfd.size += uint64(size.SizeOfUint64) /* map key */ - postingsItr = postings.Iterator(false, false, false, postingsItr) - nextPosting, err2 := postingsItr.Next() - for err2 == nil && nextPosting != nil { - docNum := nextPosting.Number() - cfd.docs[docNum] = append(cfd.docs[docNum], []byte(next.Term)...) - cfd.docs[docNum] = append(cfd.docs[docNum], TermSeparator) - cfd.size += uint64(len(next.Term) + 1) // map value - nextPosting, err2 = postingsItr.Next() - } - - if err2 != nil { - cfd.err = err2 - return - } - - next, err = dictItr.Next() - } - - if err != nil { - cfd.err = err - return - } -} - -type cachedDocs struct { - size uint64 - m sync.Mutex // As the cache is asynchronously prepared, need a lock - cache map[string]*cachedFieldDocs // Keyed by field -} - -func (c *cachedDocs) prepareFields(wantedFields []string, ss *SegmentSnapshot) error { - c.m.Lock() - - if c.cache == nil { - c.cache = make(map[string]*cachedFieldDocs, len(ss.Fields())) - } - - for _, field := range wantedFields { - _, exists := c.cache[field] - if !exists { - c.cache[field] = &cachedFieldDocs{ - readyCh: make(chan struct{}), - docs: make(map[uint64][]byte), - } - - go c.cache[field].prepareField(field, ss) - } - } - - for _, field := range wantedFields { - cachedFieldDocs := c.cache[field] - c.m.Unlock() - <-cachedFieldDocs.readyCh - - if cachedFieldDocs.err != nil { - return cachedFieldDocs.err - } - c.m.Lock() - } - - c.updateSizeLOCKED() - - c.m.Unlock() - return nil -} - -// hasFields returns true if the cache has all the given fields -func (c *cachedDocs) hasFields(fields []string) bool { - c.m.Lock() - for _, field := range fields { - if _, exists := c.cache[field]; !exists { - c.m.Unlock() - return false // found a field not in cache - } - } - c.m.Unlock() - return true -} - -func (c *cachedDocs) Size() int { - return int(atomic.LoadUint64(&c.size)) -} - -func (c *cachedDocs) updateSizeLOCKED() { - sizeInBytes := 0 - for k, v := range c.cache { // cachedFieldDocs - sizeInBytes += len(k) - if v != nil { - sizeInBytes += v.Size() - } - } - atomic.StoreUint64(&c.size, uint64(sizeInBytes)) -} - -func (c *cachedDocs) visitDoc(localDocNum uint64, - fields []string, visitor index.DocValueVisitor) { - c.m.Lock() - - for _, field := range fields { - if cachedFieldDocs, exists := c.cache[field]; exists { - c.m.Unlock() - <-cachedFieldDocs.readyCh - c.m.Lock() - - if tlist, exists := cachedFieldDocs.docs[localDocNum]; exists { - for { - i := bytes.Index(tlist, TermSeparatorSplitSlice) - if i < 0 { - break - } - visitor(field, tlist[0:i]) - tlist = tlist[i+1:] - } - } - } - } - - c.m.Unlock() -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/analysis.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/analysis.go deleted file mode 100644 index 1ebd1918e..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/analysis.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package upsidedown - -import ( - index "github.com/blevesearch/bleve_index_api" -) - -type IndexRow interface { - KeySize() int - KeyTo([]byte) (int, error) - Key() []byte - - ValueSize() int - ValueTo([]byte) (int, error) - Value() []byte -} - -type AnalysisResult struct { - DocID string - Rows []IndexRow -} - -func (udc *UpsideDownCouch) Analyze(d index.Document) *AnalysisResult { - return udc.analyze(d) -} - -func (udc *UpsideDownCouch) analyze(d index.Document) *AnalysisResult { - rv := &AnalysisResult{ - DocID: d.ID(), - Rows: make([]IndexRow, 0, 100), - } - - docIDBytes := []byte(d.ID()) - - // track our back index entries - backIndexStoredEntries := make([]*BackIndexStoreEntry, 0) - - // information we collate as we merge fields with same name - fieldTermFreqs := make(map[uint16]index.TokenFrequencies) - fieldLengths := make(map[uint16]int) - fieldIncludeTermVectors := make(map[uint16]bool) - fieldNames := make(map[uint16]string) - - analyzeField := func(field index.Field, storable bool) { - fieldIndex, newFieldRow := udc.fieldIndexOrNewRow(field.Name()) - if newFieldRow != nil { - rv.Rows = append(rv.Rows, newFieldRow) - } - fieldNames[fieldIndex] = field.Name() - - if field.Options().IsIndexed() { - field.Analyze() - fieldLength := field.AnalyzedLength() - tokenFreqs := field.AnalyzedTokenFrequencies() - existingFreqs := fieldTermFreqs[fieldIndex] - if existingFreqs == nil { - fieldTermFreqs[fieldIndex] = tokenFreqs - } else { - existingFreqs.MergeAll(field.Name(), tokenFreqs) - fieldTermFreqs[fieldIndex] = existingFreqs - } - fieldLengths[fieldIndex] += fieldLength - fieldIncludeTermVectors[fieldIndex] = field.Options().IncludeTermVectors() - } - - if storable && field.Options().IsStored() { - rv.Rows, backIndexStoredEntries = udc.storeField(docIDBytes, field, fieldIndex, rv.Rows, backIndexStoredEntries) - } - } - - // walk all the fields, record stored fields now - // place information about indexed fields into map - // this collates information across fields with - // same names (arrays) - d.VisitFields(func(field index.Field) { - analyzeField(field, true) - }) - - if d.HasComposite() { - for fieldIndex, tokenFreqs := range fieldTermFreqs { - // see if any of the composite fields need this - d.VisitComposite(func(field index.CompositeField) { - field.Compose(fieldNames[fieldIndex], fieldLengths[fieldIndex], tokenFreqs) - }) - } - - d.VisitComposite(func(field index.CompositeField) { - analyzeField(field, false) - }) - } - - rowsCapNeeded := len(rv.Rows) + 1 - for _, tokenFreqs := range fieldTermFreqs { - rowsCapNeeded += len(tokenFreqs) - } - - rv.Rows = append(make([]IndexRow, 0, rowsCapNeeded), rv.Rows...) - - backIndexTermsEntries := make([]*BackIndexTermsEntry, 0, len(fieldTermFreqs)) - - // walk through the collated information and process - // once for each indexed field (unique name) - for fieldIndex, tokenFreqs := range fieldTermFreqs { - fieldLength := fieldLengths[fieldIndex] - includeTermVectors := fieldIncludeTermVectors[fieldIndex] - - // encode this field - rv.Rows, backIndexTermsEntries = udc.indexField(docIDBytes, includeTermVectors, fieldIndex, fieldLength, tokenFreqs, rv.Rows, backIndexTermsEntries) - } - - // build the back index row - backIndexRow := NewBackIndexRow(docIDBytes, backIndexTermsEntries, backIndexStoredEntries) - rv.Rows = append(rv.Rows, backIndexRow) - - return rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/benchmark_all.sh b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/benchmark_all.sh deleted file mode 100644 index 079fef186..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/benchmark_all.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -BENCHMARKS=`grep "func Benchmark" *_test.go | sed 's/.*func //' | sed s/\(.*{//` - -for BENCHMARK in $BENCHMARKS -do - go test -v -run=xxx -bench=^$BENCHMARK$ -benchtime=10s -tags 'forestdb leveldb' | grep -v ok | grep -v PASS -done diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/dump.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/dump.go deleted file mode 100644 index 64ebb1b26..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/dump.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package upsidedown - -import ( - "bytes" - "sort" - - "github.com/blevesearch/upsidedown_store_api" -) - -// the functions in this file are only intended to be used by -// the bleve_dump utility and the debug http handlers -// if your application relies on them, you're doing something wrong -// they may change or be removed at any time - -func dumpPrefix(kvreader store.KVReader, rv chan interface{}, prefix []byte) { - start := prefix - if start == nil { - start = []byte{0} - } - it := kvreader.PrefixIterator(start) - defer func() { - cerr := it.Close() - if cerr != nil { - rv <- cerr - } - }() - key, val, valid := it.Current() - for valid { - ck := make([]byte, len(key)) - copy(ck, key) - cv := make([]byte, len(val)) - copy(cv, val) - row, err := ParseFromKeyValue(ck, cv) - if err != nil { - rv <- err - return - } - rv <- row - - it.Next() - key, val, valid = it.Current() - } -} - -func dumpRange(kvreader store.KVReader, rv chan interface{}, start, end []byte) { - it := kvreader.RangeIterator(start, end) - defer func() { - cerr := it.Close() - if cerr != nil { - rv <- cerr - } - }() - key, val, valid := it.Current() - for valid { - ck := make([]byte, len(key)) - copy(ck, key) - cv := make([]byte, len(val)) - copy(cv, val) - row, err := ParseFromKeyValue(ck, cv) - if err != nil { - rv <- err - return - } - rv <- row - - it.Next() - key, val, valid = it.Current() - } -} - -func (i *IndexReader) DumpAll() chan interface{} { - rv := make(chan interface{}) - go func() { - defer close(rv) - dumpRange(i.kvreader, rv, nil, nil) - }() - return rv -} - -func (i *IndexReader) DumpFields() chan interface{} { - rv := make(chan interface{}) - go func() { - defer close(rv) - dumpPrefix(i.kvreader, rv, []byte{'f'}) - }() - return rv -} - -type keyset [][]byte - -func (k keyset) Len() int { return len(k) } -func (k keyset) Swap(i, j int) { k[i], k[j] = k[j], k[i] } -func (k keyset) Less(i, j int) bool { return bytes.Compare(k[i], k[j]) < 0 } - -// DumpDoc returns all rows in the index related to this doc id -func (i *IndexReader) DumpDoc(id string) chan interface{} { - idBytes := []byte(id) - - rv := make(chan interface{}) - - go func() { - defer close(rv) - - back, err := backIndexRowForDoc(i.kvreader, []byte(id)) - if err != nil { - rv <- err - return - } - - // no such doc - if back == nil { - return - } - // build sorted list of term keys - keys := make(keyset, 0) - for _, entry := range back.termsEntries { - for i := range entry.Terms { - tfr := NewTermFrequencyRow([]byte(entry.Terms[i]), uint16(*entry.Field), idBytes, 0, 0) - key := tfr.Key() - keys = append(keys, key) - } - } - sort.Sort(keys) - - // first add all the stored rows - storedRowPrefix := NewStoredRow(idBytes, 0, []uint64{}, 'x', []byte{}).ScanPrefixForDoc() - dumpPrefix(i.kvreader, rv, storedRowPrefix) - - // now walk term keys in order and add them as well - if len(keys) > 0 { - it := i.kvreader.RangeIterator(keys[0], nil) - defer func() { - cerr := it.Close() - if cerr != nil { - rv <- cerr - } - }() - - for _, key := range keys { - it.Seek(key) - rkey, rval, valid := it.Current() - if !valid { - break - } - rck := make([]byte, len(rkey)) - copy(rck, key) - rcv := make([]byte, len(rval)) - copy(rcv, rval) - row, err := ParseFromKeyValue(rck, rcv) - if err != nil { - rv <- err - return - } - rv <- row - } - } - }() - - return rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_cache.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_cache.go deleted file mode 100644 index 1f68b71dd..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_cache.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package upsidedown - -import ( - "sync" -) - -type FieldCache struct { - fieldIndexes map[string]uint16 - indexFields []string - lastFieldIndex int - mutex sync.RWMutex -} - -func NewFieldCache() *FieldCache { - return &FieldCache{ - fieldIndexes: make(map[string]uint16), - lastFieldIndex: -1, - } -} - -func (f *FieldCache) AddExisting(field string, index uint16) { - f.mutex.Lock() - f.addLOCKED(field, index) - f.mutex.Unlock() -} - -func (f *FieldCache) addLOCKED(field string, index uint16) uint16 { - f.fieldIndexes[field] = index - if len(f.indexFields) < int(index)+1 { - prevIndexFields := f.indexFields - f.indexFields = make([]string, int(index)+16) - copy(f.indexFields, prevIndexFields) - } - f.indexFields[int(index)] = field - if int(index) > f.lastFieldIndex { - f.lastFieldIndex = int(index) - } - return index -} - -// FieldNamed returns the index of the field, and whether or not it existed -// before this call. if createIfMissing is true, and new field index is assigned -// but the second return value will still be false -func (f *FieldCache) FieldNamed(field string, createIfMissing bool) (uint16, bool) { - f.mutex.RLock() - if index, ok := f.fieldIndexes[field]; ok { - f.mutex.RUnlock() - return index, true - } else if !createIfMissing { - f.mutex.RUnlock() - return 0, false - } - // trade read lock for write lock - f.mutex.RUnlock() - f.mutex.Lock() - // need to check again with write lock - if index, ok := f.fieldIndexes[field]; ok { - f.mutex.Unlock() - return index, true - } - // assign next field id - index := f.addLOCKED(field, uint16(f.lastFieldIndex+1)) - f.mutex.Unlock() - return index, false -} - -func (f *FieldCache) FieldIndexed(index uint16) (field string) { - f.mutex.RLock() - if int(index) < len(f.indexFields) { - field = f.indexFields[int(index)] - } - f.mutex.RUnlock() - return field -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_dict.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_dict.go deleted file mode 100644 index c4be57740..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_dict.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package upsidedown - -import ( - "fmt" - - index "github.com/blevesearch/bleve_index_api" - store "github.com/blevesearch/upsidedown_store_api" -) - -type UpsideDownCouchFieldDict struct { - indexReader *IndexReader - iterator store.KVIterator - dictRow *DictionaryRow - dictEntry *index.DictEntry - field uint16 -} - -func newUpsideDownCouchFieldDict(indexReader *IndexReader, field uint16, startTerm, endTerm []byte) (*UpsideDownCouchFieldDict, error) { - - startKey := NewDictionaryRow(startTerm, field, 0).Key() - if endTerm == nil { - endTerm = []byte{ByteSeparator} - } else { - endTerm = incrementBytes(endTerm) - } - endKey := NewDictionaryRow(endTerm, field, 0).Key() - - it := indexReader.kvreader.RangeIterator(startKey, endKey) - - return &UpsideDownCouchFieldDict{ - indexReader: indexReader, - iterator: it, - dictRow: &DictionaryRow{}, // Pre-alloced, reused row. - dictEntry: &index.DictEntry{}, // Pre-alloced, reused entry. - field: field, - }, nil - -} - -func (r *UpsideDownCouchFieldDict) Next() (*index.DictEntry, error) { - key, val, valid := r.iterator.Current() - if !valid { - return nil, nil - } - - err := r.dictRow.parseDictionaryK(key) - if err != nil { - return nil, fmt.Errorf("unexpected error parsing dictionary row key: %v", err) - } - err = r.dictRow.parseDictionaryV(val) - if err != nil { - return nil, fmt.Errorf("unexpected error parsing dictionary row val: %v", err) - } - r.dictEntry.Term = string(r.dictRow.term) - r.dictEntry.Count = r.dictRow.count - // advance the iterator to the next term - r.iterator.Next() - return r.dictEntry, nil - -} - -func (r *UpsideDownCouchFieldDict) Close() error { - return r.iterator.Close() -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/index_reader.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/index_reader.go deleted file mode 100644 index ff0986d57..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/index_reader.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package upsidedown - -import ( - "reflect" - - "github.com/blevesearch/bleve/v2/document" - index "github.com/blevesearch/bleve_index_api" - "github.com/blevesearch/upsidedown_store_api" -) - -var reflectStaticSizeIndexReader int - -func init() { - var ir IndexReader - reflectStaticSizeIndexReader = int(reflect.TypeOf(ir).Size()) -} - -type IndexReader struct { - index *UpsideDownCouch - kvreader store.KVReader - docCount uint64 -} - -func (i *IndexReader) TermFieldReader(term []byte, fieldName string, includeFreq, includeNorm, includeTermVectors bool) (index.TermFieldReader, error) { - fieldIndex, fieldExists := i.index.fieldCache.FieldNamed(fieldName, false) - if fieldExists { - return newUpsideDownCouchTermFieldReader(i, term, uint16(fieldIndex), includeFreq, includeNorm, includeTermVectors) - } - return newUpsideDownCouchTermFieldReader(i, []byte{ByteSeparator}, ^uint16(0), includeFreq, includeNorm, includeTermVectors) -} - -func (i *IndexReader) FieldDict(fieldName string) (index.FieldDict, error) { - return i.FieldDictRange(fieldName, nil, nil) -} - -func (i *IndexReader) FieldDictRange(fieldName string, startTerm []byte, endTerm []byte) (index.FieldDict, error) { - fieldIndex, fieldExists := i.index.fieldCache.FieldNamed(fieldName, false) - if fieldExists { - return newUpsideDownCouchFieldDict(i, uint16(fieldIndex), startTerm, endTerm) - } - return newUpsideDownCouchFieldDict(i, ^uint16(0), []byte{ByteSeparator}, []byte{}) -} - -func (i *IndexReader) FieldDictPrefix(fieldName string, termPrefix []byte) (index.FieldDict, error) { - return i.FieldDictRange(fieldName, termPrefix, termPrefix) -} - -func (i *IndexReader) DocIDReaderAll() (index.DocIDReader, error) { - return newUpsideDownCouchDocIDReader(i) -} - -func (i *IndexReader) DocIDReaderOnly(ids []string) (index.DocIDReader, error) { - return newUpsideDownCouchDocIDReaderOnly(i, ids) -} - -func (i *IndexReader) Document(id string) (doc index.Document, err error) { - // first hit the back index to confirm doc exists - var backIndexRow *BackIndexRow - backIndexRow, err = backIndexRowForDoc(i.kvreader, []byte(id)) - if err != nil { - return - } - if backIndexRow == nil { - return - } - rvd := document.NewDocument(id) - storedRow := NewStoredRow([]byte(id), 0, []uint64{}, 'x', nil) - storedRowScanPrefix := storedRow.ScanPrefixForDoc() - it := i.kvreader.PrefixIterator(storedRowScanPrefix) - defer func() { - if cerr := it.Close(); err == nil && cerr != nil { - err = cerr - } - }() - key, val, valid := it.Current() - for valid { - safeVal := make([]byte, len(val)) - copy(safeVal, val) - var row *StoredRow - row, err = NewStoredRowKV(key, safeVal) - if err != nil { - return nil, err - } - if row != nil { - fieldName := i.index.fieldCache.FieldIndexed(row.field) - field := decodeFieldType(row.typ, fieldName, row.arrayPositions, row.value) - if field != nil { - rvd.AddField(field) - } - } - - it.Next() - key, val, valid = it.Current() - } - return rvd, nil -} - -func (i *IndexReader) documentVisitFieldTerms(id index.IndexInternalID, fields []string, visitor index.DocValueVisitor) error { - fieldsMap := make(map[uint16]string, len(fields)) - for _, f := range fields { - id, ok := i.index.fieldCache.FieldNamed(f, false) - if ok { - fieldsMap[id] = f - } - } - - tempRow := BackIndexRow{ - doc: id, - } - - keyBuf := GetRowBuffer() - if tempRow.KeySize() > len(keyBuf) { - keyBuf = make([]byte, 2*tempRow.KeySize()) - } - defer PutRowBuffer(keyBuf) - keySize, err := tempRow.KeyTo(keyBuf) - if err != nil { - return err - } - - value, err := i.kvreader.Get(keyBuf[:keySize]) - if err != nil { - return err - } - if value == nil { - return nil - } - - return visitBackIndexRow(value, func(field uint32, term []byte) { - if field, ok := fieldsMap[uint16(field)]; ok { - visitor(field, term) - } - }) -} - -func (i *IndexReader) Fields() (fields []string, err error) { - fields = make([]string, 0) - it := i.kvreader.PrefixIterator([]byte{'f'}) - defer func() { - if cerr := it.Close(); err == nil && cerr != nil { - err = cerr - } - }() - key, val, valid := it.Current() - for valid { - var row UpsideDownCouchRow - row, err = ParseFromKeyValue(key, val) - if err != nil { - fields = nil - return - } - if row != nil { - fieldRow, ok := row.(*FieldRow) - if ok { - fields = append(fields, fieldRow.name) - } - } - - it.Next() - key, val, valid = it.Current() - } - return -} - -func (i *IndexReader) GetInternal(key []byte) ([]byte, error) { - internalRow := NewInternalRow(key, nil) - return i.kvreader.Get(internalRow.Key()) -} - -func (i *IndexReader) DocCount() (uint64, error) { - return i.docCount, nil -} - -func (i *IndexReader) Close() error { - return i.kvreader.Close() -} - -func (i *IndexReader) ExternalID(id index.IndexInternalID) (string, error) { - return string(id), nil -} - -func (i *IndexReader) InternalID(id string) (index.IndexInternalID, error) { - return index.IndexInternalID(id), nil -} - -func incrementBytes(in []byte) []byte { - rv := make([]byte, len(in)) - copy(rv, in) - for i := len(rv) - 1; i >= 0; i-- { - rv[i] = rv[i] + 1 - if rv[i] != 0 { - // didn't overflow, so stop - break - } - } - return rv -} - -func (i *IndexReader) DocValueReader(fields []string) (index.DocValueReader, error) { - return &DocValueReader{i: i, fields: fields}, nil -} - -type DocValueReader struct { - i *IndexReader - fields []string -} - -func (dvr *DocValueReader) VisitDocValues(id index.IndexInternalID, - visitor index.DocValueVisitor) error { - return dvr.i.documentVisitFieldTerms(id, dvr.fields, visitor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/reader.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/reader.go deleted file mode 100644 index 68b15318e..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/reader.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package upsidedown - -import ( - "bytes" - "reflect" - "sort" - "sync/atomic" - - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" - "github.com/blevesearch/upsidedown_store_api" -) - -var reflectStaticSizeUpsideDownCouchTermFieldReader int -var reflectStaticSizeUpsideDownCouchDocIDReader int - -func init() { - var tfr UpsideDownCouchTermFieldReader - reflectStaticSizeUpsideDownCouchTermFieldReader = - int(reflect.TypeOf(tfr).Size()) - var cdr UpsideDownCouchDocIDReader - reflectStaticSizeUpsideDownCouchDocIDReader = - int(reflect.TypeOf(cdr).Size()) -} - -type UpsideDownCouchTermFieldReader struct { - count uint64 - indexReader *IndexReader - iterator store.KVIterator - term []byte - tfrNext *TermFrequencyRow - tfrPrealloc TermFrequencyRow - keyBuf []byte - field uint16 - includeTermVectors bool -} - -func (r *UpsideDownCouchTermFieldReader) Size() int { - sizeInBytes := reflectStaticSizeUpsideDownCouchTermFieldReader + size.SizeOfPtr + - len(r.term) + - r.tfrPrealloc.Size() + - len(r.keyBuf) - - if r.tfrNext != nil { - sizeInBytes += r.tfrNext.Size() - } - - return sizeInBytes -} - -func newUpsideDownCouchTermFieldReader(indexReader *IndexReader, term []byte, field uint16, includeFreq, includeNorm, includeTermVectors bool) (*UpsideDownCouchTermFieldReader, error) { - bufNeeded := termFrequencyRowKeySize(term, nil) - if bufNeeded < dictionaryRowKeySize(term) { - bufNeeded = dictionaryRowKeySize(term) - } - buf := make([]byte, bufNeeded) - - bufUsed := dictionaryRowKeyTo(buf, field, term) - val, err := indexReader.kvreader.Get(buf[:bufUsed]) - if err != nil { - return nil, err - } - if val == nil { - atomic.AddUint64(&indexReader.index.stats.termSearchersStarted, uint64(1)) - rv := &UpsideDownCouchTermFieldReader{ - count: 0, - term: term, - field: field, - includeTermVectors: includeTermVectors, - } - rv.tfrNext = &rv.tfrPrealloc - return rv, nil - } - - count, err := dictionaryRowParseV(val) - if err != nil { - return nil, err - } - - bufUsed = termFrequencyRowKeyTo(buf, field, term, nil) - it := indexReader.kvreader.PrefixIterator(buf[:bufUsed]) - - atomic.AddUint64(&indexReader.index.stats.termSearchersStarted, uint64(1)) - return &UpsideDownCouchTermFieldReader{ - indexReader: indexReader, - iterator: it, - count: count, - term: term, - field: field, - includeTermVectors: includeTermVectors, - }, nil -} - -func (r *UpsideDownCouchTermFieldReader) Count() uint64 { - return r.count -} - -func (r *UpsideDownCouchTermFieldReader) Next(preAlloced *index.TermFieldDoc) (*index.TermFieldDoc, error) { - if r.iterator != nil { - // We treat tfrNext also like an initialization flag, which - // tells us whether we need to invoke the underlying - // iterator.Next(). The first time, don't call iterator.Next(). - if r.tfrNext != nil { - r.iterator.Next() - } else { - r.tfrNext = &r.tfrPrealloc - } - key, val, valid := r.iterator.Current() - if valid { - tfr := r.tfrNext - err := tfr.parseKDoc(key, r.term) - if err != nil { - return nil, err - } - err = tfr.parseV(val, r.includeTermVectors) - if err != nil { - return nil, err - } - rv := preAlloced - if rv == nil { - rv = &index.TermFieldDoc{} - } - rv.ID = append(rv.ID, tfr.doc...) - rv.Freq = tfr.freq - rv.Norm = float64(tfr.norm) - if tfr.vectors != nil { - rv.Vectors = r.indexReader.index.termFieldVectorsFromTermVectors(tfr.vectors) - } - return rv, nil - } - } - return nil, nil -} - -func (r *UpsideDownCouchTermFieldReader) Advance(docID index.IndexInternalID, preAlloced *index.TermFieldDoc) (rv *index.TermFieldDoc, err error) { - if r.iterator != nil { - if r.tfrNext == nil { - r.tfrNext = &TermFrequencyRow{} - } - tfr := InitTermFrequencyRow(r.tfrNext, r.term, r.field, docID, 0, 0) - r.keyBuf, err = tfr.KeyAppendTo(r.keyBuf[:0]) - if err != nil { - return nil, err - } - r.iterator.Seek(r.keyBuf) - key, val, valid := r.iterator.Current() - if valid { - err := tfr.parseKDoc(key, r.term) - if err != nil { - return nil, err - } - err = tfr.parseV(val, r.includeTermVectors) - if err != nil { - return nil, err - } - rv = preAlloced - if rv == nil { - rv = &index.TermFieldDoc{} - } - rv.ID = append(rv.ID, tfr.doc...) - rv.Freq = tfr.freq - rv.Norm = float64(tfr.norm) - if tfr.vectors != nil { - rv.Vectors = r.indexReader.index.termFieldVectorsFromTermVectors(tfr.vectors) - } - return rv, nil - } - } - return nil, nil -} - -func (r *UpsideDownCouchTermFieldReader) Close() error { - if r.indexReader != nil { - atomic.AddUint64(&r.indexReader.index.stats.termSearchersFinished, uint64(1)) - } - if r.iterator != nil { - return r.iterator.Close() - } - return nil -} - -type UpsideDownCouchDocIDReader struct { - indexReader *IndexReader - iterator store.KVIterator - only []string - onlyPos int - onlyMode bool -} - -func (r *UpsideDownCouchDocIDReader) Size() int { - sizeInBytes := reflectStaticSizeUpsideDownCouchDocIDReader + - reflectStaticSizeIndexReader + size.SizeOfPtr - - for _, entry := range r.only { - sizeInBytes += size.SizeOfString + len(entry) - } - - return sizeInBytes -} - -func newUpsideDownCouchDocIDReader(indexReader *IndexReader) (*UpsideDownCouchDocIDReader, error) { - startBytes := []byte{0x0} - endBytes := []byte{0xff} - - bisr := NewBackIndexRow(startBytes, nil, nil) - bier := NewBackIndexRow(endBytes, nil, nil) - it := indexReader.kvreader.RangeIterator(bisr.Key(), bier.Key()) - - return &UpsideDownCouchDocIDReader{ - indexReader: indexReader, - iterator: it, - }, nil -} - -func newUpsideDownCouchDocIDReaderOnly(indexReader *IndexReader, ids []string) (*UpsideDownCouchDocIDReader, error) { - // we don't actually own the list of ids, so if before we sort we must copy - idsCopy := make([]string, len(ids)) - copy(idsCopy, ids) - // ensure ids are sorted - sort.Strings(idsCopy) - startBytes := []byte{0x0} - if len(idsCopy) > 0 { - startBytes = []byte(idsCopy[0]) - } - endBytes := []byte{0xff} - if len(idsCopy) > 0 { - endBytes = incrementBytes([]byte(idsCopy[len(idsCopy)-1])) - } - bisr := NewBackIndexRow(startBytes, nil, nil) - bier := NewBackIndexRow(endBytes, nil, nil) - it := indexReader.kvreader.RangeIterator(bisr.Key(), bier.Key()) - - return &UpsideDownCouchDocIDReader{ - indexReader: indexReader, - iterator: it, - only: idsCopy, - onlyMode: true, - }, nil -} - -func (r *UpsideDownCouchDocIDReader) Next() (index.IndexInternalID, error) { - key, val, valid := r.iterator.Current() - - if r.onlyMode { - var rv index.IndexInternalID - for valid && r.onlyPos < len(r.only) { - br, err := NewBackIndexRowKV(key, val) - if err != nil { - return nil, err - } - if !bytes.Equal(br.doc, []byte(r.only[r.onlyPos])) { - ok := r.nextOnly() - if !ok { - return nil, nil - } - r.iterator.Seek(NewBackIndexRow([]byte(r.only[r.onlyPos]), nil, nil).Key()) - key, val, valid = r.iterator.Current() - continue - } else { - rv = append([]byte(nil), br.doc...) - break - } - } - if valid && r.onlyPos < len(r.only) { - ok := r.nextOnly() - if ok { - r.iterator.Seek(NewBackIndexRow([]byte(r.only[r.onlyPos]), nil, nil).Key()) - } - return rv, nil - } - - } else { - if valid { - br, err := NewBackIndexRowKV(key, val) - if err != nil { - return nil, err - } - rv := append([]byte(nil), br.doc...) - r.iterator.Next() - return rv, nil - } - } - return nil, nil -} - -func (r *UpsideDownCouchDocIDReader) Advance(docID index.IndexInternalID) (index.IndexInternalID, error) { - - if r.onlyMode { - r.onlyPos = sort.SearchStrings(r.only, string(docID)) - if r.onlyPos >= len(r.only) { - // advanced to key after our last only key - return nil, nil - } - r.iterator.Seek(NewBackIndexRow([]byte(r.only[r.onlyPos]), nil, nil).Key()) - key, val, valid := r.iterator.Current() - - var rv index.IndexInternalID - for valid && r.onlyPos < len(r.only) { - br, err := NewBackIndexRowKV(key, val) - if err != nil { - return nil, err - } - if !bytes.Equal(br.doc, []byte(r.only[r.onlyPos])) { - // the only key we seek'd to didn't exist - // now look for the closest key that did exist in only - r.onlyPos = sort.SearchStrings(r.only, string(br.doc)) - if r.onlyPos >= len(r.only) { - // advanced to key after our last only key - return nil, nil - } - // now seek to this new only key - r.iterator.Seek(NewBackIndexRow([]byte(r.only[r.onlyPos]), nil, nil).Key()) - key, val, valid = r.iterator.Current() - continue - } else { - rv = append([]byte(nil), br.doc...) - break - } - } - if valid && r.onlyPos < len(r.only) { - ok := r.nextOnly() - if ok { - r.iterator.Seek(NewBackIndexRow([]byte(r.only[r.onlyPos]), nil, nil).Key()) - } - return rv, nil - } - } else { - bir := NewBackIndexRow(docID, nil, nil) - r.iterator.Seek(bir.Key()) - key, val, valid := r.iterator.Current() - if valid { - br, err := NewBackIndexRowKV(key, val) - if err != nil { - return nil, err - } - rv := append([]byte(nil), br.doc...) - r.iterator.Next() - return rv, nil - } - } - return nil, nil -} - -func (r *UpsideDownCouchDocIDReader) Close() error { - return r.iterator.Close() -} - -// move the r.only pos forward one, skipping duplicates -// return true if there is more data, or false if we got to the end of the list -func (r *UpsideDownCouchDocIDReader) nextOnly() bool { - - // advance 1 position, until we see a different key - // it's already sorted, so this skips duplicates - start := r.onlyPos - r.onlyPos++ - for r.onlyPos < len(r.only) && r.only[r.onlyPos] == r.only[start] { - start = r.onlyPos - r.onlyPos++ - } - // inidicate if we got to the end of the list - return r.onlyPos < len(r.only) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/row.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/row.go deleted file mode 100644 index 901a4bcb0..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/row.go +++ /dev/null @@ -1,1141 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package upsidedown - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "math" - "reflect" - - "github.com/blevesearch/bleve/v2/size" - "github.com/golang/protobuf/proto" -) - -var reflectStaticSizeTermFrequencyRow int -var reflectStaticSizeTermVector int - -func init() { - var tfr TermFrequencyRow - reflectStaticSizeTermFrequencyRow = int(reflect.TypeOf(tfr).Size()) - var tv TermVector - reflectStaticSizeTermVector = int(reflect.TypeOf(tv).Size()) -} - -const ByteSeparator byte = 0xff - -type UpsideDownCouchRowStream chan UpsideDownCouchRow - -type UpsideDownCouchRow interface { - KeySize() int - KeyTo([]byte) (int, error) - Key() []byte - Value() []byte - ValueSize() int - ValueTo([]byte) (int, error) -} - -func ParseFromKeyValue(key, value []byte) (UpsideDownCouchRow, error) { - if len(key) > 0 { - switch key[0] { - case 'v': - return NewVersionRowKV(key, value) - case 'f': - return NewFieldRowKV(key, value) - case 'd': - return NewDictionaryRowKV(key, value) - case 't': - return NewTermFrequencyRowKV(key, value) - case 'b': - return NewBackIndexRowKV(key, value) - case 's': - return NewStoredRowKV(key, value) - case 'i': - return NewInternalRowKV(key, value) - } - return nil, fmt.Errorf("Unknown field type '%s'", string(key[0])) - } - return nil, fmt.Errorf("Invalid empty key") -} - -// VERSION - -type VersionRow struct { - version uint8 -} - -func (v *VersionRow) Key() []byte { - return []byte{'v'} -} - -func (v *VersionRow) KeySize() int { - return 1 -} - -func (v *VersionRow) KeyTo(buf []byte) (int, error) { - buf[0] = 'v' - return 1, nil -} - -func (v *VersionRow) Value() []byte { - return []byte{byte(v.version)} -} - -func (v *VersionRow) ValueSize() int { - return 1 -} - -func (v *VersionRow) ValueTo(buf []byte) (int, error) { - buf[0] = v.version - return 1, nil -} - -func (v *VersionRow) String() string { - return fmt.Sprintf("Version: %d", v.version) -} - -func NewVersionRow(version uint8) *VersionRow { - return &VersionRow{ - version: version, - } -} - -func NewVersionRowKV(key, value []byte) (*VersionRow, error) { - rv := VersionRow{} - buf := bytes.NewBuffer(value) - err := binary.Read(buf, binary.LittleEndian, &rv.version) - if err != nil { - return nil, err - } - return &rv, nil -} - -// INTERNAL STORAGE - -type InternalRow struct { - key []byte - val []byte -} - -func (i *InternalRow) Key() []byte { - buf := make([]byte, i.KeySize()) - size, _ := i.KeyTo(buf) - return buf[:size] -} - -func (i *InternalRow) KeySize() int { - return len(i.key) + 1 -} - -func (i *InternalRow) KeyTo(buf []byte) (int, error) { - buf[0] = 'i' - actual := copy(buf[1:], i.key) - return 1 + actual, nil -} - -func (i *InternalRow) Value() []byte { - return i.val -} - -func (i *InternalRow) ValueSize() int { - return len(i.val) -} - -func (i *InternalRow) ValueTo(buf []byte) (int, error) { - actual := copy(buf, i.val) - return actual, nil -} - -func (i *InternalRow) String() string { - return fmt.Sprintf("InternalStore - Key: %s (% x) Val: %s (% x)", i.key, i.key, i.val, i.val) -} - -func NewInternalRow(key, val []byte) *InternalRow { - return &InternalRow{ - key: key, - val: val, - } -} - -func NewInternalRowKV(key, value []byte) (*InternalRow, error) { - rv := InternalRow{} - rv.key = key[1:] - rv.val = value - return &rv, nil -} - -// FIELD definition - -type FieldRow struct { - index uint16 - name string -} - -func (f *FieldRow) Key() []byte { - buf := make([]byte, f.KeySize()) - size, _ := f.KeyTo(buf) - return buf[:size] -} - -func (f *FieldRow) KeySize() int { - return 3 -} - -func (f *FieldRow) KeyTo(buf []byte) (int, error) { - buf[0] = 'f' - binary.LittleEndian.PutUint16(buf[1:3], f.index) - return 3, nil -} - -func (f *FieldRow) Value() []byte { - return append([]byte(f.name), ByteSeparator) -} - -func (f *FieldRow) ValueSize() int { - return len(f.name) + 1 -} - -func (f *FieldRow) ValueTo(buf []byte) (int, error) { - size := copy(buf, f.name) - buf[size] = ByteSeparator - return size + 1, nil -} - -func (f *FieldRow) String() string { - return fmt.Sprintf("Field: %d Name: %s", f.index, f.name) -} - -func NewFieldRow(index uint16, name string) *FieldRow { - return &FieldRow{ - index: index, - name: name, - } -} - -func NewFieldRowKV(key, value []byte) (*FieldRow, error) { - rv := FieldRow{} - - buf := bytes.NewBuffer(key) - _, err := buf.ReadByte() // type - if err != nil { - return nil, err - } - err = binary.Read(buf, binary.LittleEndian, &rv.index) - if err != nil { - return nil, err - } - - buf = bytes.NewBuffer(value) - rv.name, err = buf.ReadString(ByteSeparator) - if err != nil { - return nil, err - } - rv.name = rv.name[:len(rv.name)-1] // trim off separator byte - - return &rv, nil -} - -// DICTIONARY - -const DictionaryRowMaxValueSize = binary.MaxVarintLen64 - -type DictionaryRow struct { - term []byte - count uint64 - field uint16 -} - -func (dr *DictionaryRow) Key() []byte { - buf := make([]byte, dr.KeySize()) - size, _ := dr.KeyTo(buf) - return buf[:size] -} - -func (dr *DictionaryRow) KeySize() int { - return dictionaryRowKeySize(dr.term) -} - -func dictionaryRowKeySize(term []byte) int { - return len(term) + 3 -} - -func (dr *DictionaryRow) KeyTo(buf []byte) (int, error) { - return dictionaryRowKeyTo(buf, dr.field, dr.term), nil -} - -func dictionaryRowKeyTo(buf []byte, field uint16, term []byte) int { - buf[0] = 'd' - binary.LittleEndian.PutUint16(buf[1:3], field) - size := copy(buf[3:], term) - return size + 3 -} - -func (dr *DictionaryRow) Value() []byte { - buf := make([]byte, dr.ValueSize()) - size, _ := dr.ValueTo(buf) - return buf[:size] -} - -func (dr *DictionaryRow) ValueSize() int { - return DictionaryRowMaxValueSize -} - -func (dr *DictionaryRow) ValueTo(buf []byte) (int, error) { - used := binary.PutUvarint(buf, dr.count) - return used, nil -} - -func (dr *DictionaryRow) String() string { - return fmt.Sprintf("Dictionary Term: `%s` Field: %d Count: %d ", string(dr.term), dr.field, dr.count) -} - -func NewDictionaryRow(term []byte, field uint16, count uint64) *DictionaryRow { - return &DictionaryRow{ - term: term, - field: field, - count: count, - } -} - -func NewDictionaryRowKV(key, value []byte) (*DictionaryRow, error) { - rv, err := NewDictionaryRowK(key) - if err != nil { - return nil, err - } - - err = rv.parseDictionaryV(value) - if err != nil { - return nil, err - } - return rv, nil - -} - -func NewDictionaryRowK(key []byte) (*DictionaryRow, error) { - rv := &DictionaryRow{} - err := rv.parseDictionaryK(key) - if err != nil { - return nil, err - } - return rv, nil -} - -func (dr *DictionaryRow) parseDictionaryK(key []byte) error { - dr.field = binary.LittleEndian.Uint16(key[1:3]) - if dr.term != nil { - dr.term = dr.term[:0] - } - dr.term = append(dr.term, key[3:]...) - return nil -} - -func (dr *DictionaryRow) parseDictionaryV(value []byte) error { - count, err := dictionaryRowParseV(value) - if err != nil { - return err - } - dr.count = count - return nil -} - -func dictionaryRowParseV(value []byte) (uint64, error) { - count, nread := binary.Uvarint(value) - if nread <= 0 { - return 0, fmt.Errorf("DictionaryRow parse Uvarint error, nread: %d", nread) - } - return count, nil -} - -// TERM FIELD FREQUENCY - -type TermVector struct { - field uint16 - arrayPositions []uint64 - pos uint64 - start uint64 - end uint64 -} - -func (tv *TermVector) Size() int { - return reflectStaticSizeTermVector + size.SizeOfPtr + - len(tv.arrayPositions)*size.SizeOfUint64 -} - -func (tv *TermVector) String() string { - return fmt.Sprintf("Field: %d Pos: %d Start: %d End %d ArrayPositions: %#v", tv.field, tv.pos, tv.start, tv.end, tv.arrayPositions) -} - -type TermFrequencyRow struct { - term []byte - doc []byte - freq uint64 - vectors []*TermVector - norm float32 - field uint16 -} - -func (tfr *TermFrequencyRow) Size() int { - sizeInBytes := reflectStaticSizeTermFrequencyRow + - len(tfr.term) + - len(tfr.doc) - - for _, entry := range tfr.vectors { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -func (tfr *TermFrequencyRow) Term() []byte { - return tfr.term -} - -func (tfr *TermFrequencyRow) Freq() uint64 { - return tfr.freq -} - -func (tfr *TermFrequencyRow) ScanPrefixForField() []byte { - buf := make([]byte, 3) - buf[0] = 't' - binary.LittleEndian.PutUint16(buf[1:3], tfr.field) - return buf -} - -func (tfr *TermFrequencyRow) ScanPrefixForFieldTermPrefix() []byte { - buf := make([]byte, 3+len(tfr.term)) - buf[0] = 't' - binary.LittleEndian.PutUint16(buf[1:3], tfr.field) - copy(buf[3:], tfr.term) - return buf -} - -func (tfr *TermFrequencyRow) ScanPrefixForFieldTerm() []byte { - buf := make([]byte, 3+len(tfr.term)+1) - buf[0] = 't' - binary.LittleEndian.PutUint16(buf[1:3], tfr.field) - termLen := copy(buf[3:], tfr.term) - buf[3+termLen] = ByteSeparator - return buf -} - -func (tfr *TermFrequencyRow) Key() []byte { - buf := make([]byte, tfr.KeySize()) - size, _ := tfr.KeyTo(buf) - return buf[:size] -} - -func (tfr *TermFrequencyRow) KeySize() int { - return termFrequencyRowKeySize(tfr.term, tfr.doc) -} - -func termFrequencyRowKeySize(term, doc []byte) int { - return 3 + len(term) + 1 + len(doc) -} - -func (tfr *TermFrequencyRow) KeyTo(buf []byte) (int, error) { - return termFrequencyRowKeyTo(buf, tfr.field, tfr.term, tfr.doc), nil -} - -func termFrequencyRowKeyTo(buf []byte, field uint16, term, doc []byte) int { - buf[0] = 't' - binary.LittleEndian.PutUint16(buf[1:3], field) - termLen := copy(buf[3:], term) - buf[3+termLen] = ByteSeparator - docLen := copy(buf[3+termLen+1:], doc) - return 3 + termLen + 1 + docLen -} - -func (tfr *TermFrequencyRow) KeyAppendTo(buf []byte) ([]byte, error) { - keySize := tfr.KeySize() - if cap(buf) < keySize { - buf = make([]byte, keySize) - } - actualSize, err := tfr.KeyTo(buf[0:keySize]) - return buf[0:actualSize], err -} - -func (tfr *TermFrequencyRow) DictionaryRowKey() []byte { - dr := NewDictionaryRow(tfr.term, tfr.field, 0) - return dr.Key() -} - -func (tfr *TermFrequencyRow) DictionaryRowKeySize() int { - dr := NewDictionaryRow(tfr.term, tfr.field, 0) - return dr.KeySize() -} - -func (tfr *TermFrequencyRow) DictionaryRowKeyTo(buf []byte) (int, error) { - dr := NewDictionaryRow(tfr.term, tfr.field, 0) - return dr.KeyTo(buf) -} - -func (tfr *TermFrequencyRow) Value() []byte { - buf := make([]byte, tfr.ValueSize()) - size, _ := tfr.ValueTo(buf) - return buf[:size] -} - -func (tfr *TermFrequencyRow) ValueSize() int { - bufLen := binary.MaxVarintLen64 + binary.MaxVarintLen64 - for _, vector := range tfr.vectors { - bufLen += (binary.MaxVarintLen64 * 4) + (1+len(vector.arrayPositions))*binary.MaxVarintLen64 - } - return bufLen -} - -func (tfr *TermFrequencyRow) ValueTo(buf []byte) (int, error) { - used := binary.PutUvarint(buf[:binary.MaxVarintLen64], tfr.freq) - - normuint32 := math.Float32bits(tfr.norm) - newbuf := buf[used : used+binary.MaxVarintLen64] - used += binary.PutUvarint(newbuf, uint64(normuint32)) - - for _, vector := range tfr.vectors { - used += binary.PutUvarint(buf[used:used+binary.MaxVarintLen64], uint64(vector.field)) - used += binary.PutUvarint(buf[used:used+binary.MaxVarintLen64], vector.pos) - used += binary.PutUvarint(buf[used:used+binary.MaxVarintLen64], vector.start) - used += binary.PutUvarint(buf[used:used+binary.MaxVarintLen64], vector.end) - used += binary.PutUvarint(buf[used:used+binary.MaxVarintLen64], uint64(len(vector.arrayPositions))) - for _, arrayPosition := range vector.arrayPositions { - used += binary.PutUvarint(buf[used:used+binary.MaxVarintLen64], arrayPosition) - } - } - return used, nil -} - -func (tfr *TermFrequencyRow) String() string { - return fmt.Sprintf("Term: `%s` Field: %d DocId: `%s` Frequency: %d Norm: %f Vectors: %v", string(tfr.term), tfr.field, string(tfr.doc), tfr.freq, tfr.norm, tfr.vectors) -} - -func InitTermFrequencyRow(tfr *TermFrequencyRow, term []byte, field uint16, docID []byte, freq uint64, norm float32) *TermFrequencyRow { - tfr.term = term - tfr.field = field - tfr.doc = docID - tfr.freq = freq - tfr.norm = norm - return tfr -} - -func NewTermFrequencyRow(term []byte, field uint16, docID []byte, freq uint64, norm float32) *TermFrequencyRow { - return &TermFrequencyRow{ - term: term, - field: field, - doc: docID, - freq: freq, - norm: norm, - } -} - -func NewTermFrequencyRowWithTermVectors(term []byte, field uint16, docID []byte, freq uint64, norm float32, vectors []*TermVector) *TermFrequencyRow { - return &TermFrequencyRow{ - term: term, - field: field, - doc: docID, - freq: freq, - norm: norm, - vectors: vectors, - } -} - -func NewTermFrequencyRowK(key []byte) (*TermFrequencyRow, error) { - rv := &TermFrequencyRow{} - err := rv.parseK(key) - if err != nil { - return nil, err - } - return rv, nil -} - -func (tfr *TermFrequencyRow) parseK(key []byte) error { - keyLen := len(key) - if keyLen < 3 { - return fmt.Errorf("invalid term frequency key, no valid field") - } - tfr.field = binary.LittleEndian.Uint16(key[1:3]) - - termEndPos := bytes.IndexByte(key[3:], ByteSeparator) - if termEndPos < 0 { - return fmt.Errorf("invalid term frequency key, no byte separator terminating term") - } - tfr.term = key[3 : 3+termEndPos] - - docLen := keyLen - (3 + termEndPos + 1) - if docLen < 1 { - return fmt.Errorf("invalid term frequency key, empty docid") - } - tfr.doc = key[3+termEndPos+1:] - - return nil -} - -func (tfr *TermFrequencyRow) parseKDoc(key []byte, term []byte) error { - tfr.doc = key[3+len(term)+1:] - if len(tfr.doc) == 0 { - return fmt.Errorf("invalid term frequency key, empty docid") - } - - return nil -} - -func (tfr *TermFrequencyRow) parseV(value []byte, includeTermVectors bool) error { - var bytesRead int - tfr.freq, bytesRead = binary.Uvarint(value) - if bytesRead <= 0 { - return fmt.Errorf("invalid term frequency value, invalid frequency") - } - currOffset := bytesRead - - var norm uint64 - norm, bytesRead = binary.Uvarint(value[currOffset:]) - if bytesRead <= 0 { - return fmt.Errorf("invalid term frequency value, no norm") - } - currOffset += bytesRead - - tfr.norm = math.Float32frombits(uint32(norm)) - - tfr.vectors = nil - if !includeTermVectors { - return nil - } - - var field uint64 - field, bytesRead = binary.Uvarint(value[currOffset:]) - for bytesRead > 0 { - currOffset += bytesRead - tv := TermVector{} - tv.field = uint16(field) - // at this point we expect at least one term vector - if tfr.vectors == nil { - tfr.vectors = make([]*TermVector, 0) - } - - tv.pos, bytesRead = binary.Uvarint(value[currOffset:]) - if bytesRead <= 0 { - return fmt.Errorf("invalid term frequency value, vector contains no position") - } - currOffset += bytesRead - - tv.start, bytesRead = binary.Uvarint(value[currOffset:]) - if bytesRead <= 0 { - return fmt.Errorf("invalid term frequency value, vector contains no start") - } - currOffset += bytesRead - - tv.end, bytesRead = binary.Uvarint(value[currOffset:]) - if bytesRead <= 0 { - return fmt.Errorf("invalid term frequency value, vector contains no end") - } - currOffset += bytesRead - - var arrayPositionsLen uint64 = 0 - arrayPositionsLen, bytesRead = binary.Uvarint(value[currOffset:]) - if bytesRead <= 0 { - return fmt.Errorf("invalid term frequency value, vector contains no arrayPositionLen") - } - currOffset += bytesRead - - if arrayPositionsLen > 0 { - tv.arrayPositions = make([]uint64, arrayPositionsLen) - for i := 0; uint64(i) < arrayPositionsLen; i++ { - tv.arrayPositions[i], bytesRead = binary.Uvarint(value[currOffset:]) - if bytesRead <= 0 { - return fmt.Errorf("invalid term frequency value, vector contains no arrayPosition of index %d", i) - } - currOffset += bytesRead - } - } - - tfr.vectors = append(tfr.vectors, &tv) - // try to read next record (may not exist) - field, bytesRead = binary.Uvarint(value[currOffset:]) - } - if len(value[currOffset:]) > 0 && bytesRead <= 0 { - return fmt.Errorf("invalid term frequency value, vector field invalid") - } - - return nil -} - -func NewTermFrequencyRowKV(key, value []byte) (*TermFrequencyRow, error) { - rv, err := NewTermFrequencyRowK(key) - if err != nil { - return nil, err - } - - err = rv.parseV(value, true) - if err != nil { - return nil, err - } - return rv, nil - -} - -type BackIndexRow struct { - doc []byte - termsEntries []*BackIndexTermsEntry - storedEntries []*BackIndexStoreEntry -} - -func (br *BackIndexRow) AllTermKeys() [][]byte { - if br == nil { - return nil - } - rv := make([][]byte, 0, len(br.termsEntries)) // FIXME this underestimates severely - for _, termsEntry := range br.termsEntries { - for i := range termsEntry.Terms { - termRow := NewTermFrequencyRow([]byte(termsEntry.Terms[i]), uint16(termsEntry.GetField()), br.doc, 0, 0) - rv = append(rv, termRow.Key()) - } - } - return rv -} - -func (br *BackIndexRow) AllStoredKeys() [][]byte { - if br == nil { - return nil - } - rv := make([][]byte, len(br.storedEntries)) - for i, storedEntry := range br.storedEntries { - storedRow := NewStoredRow(br.doc, uint16(storedEntry.GetField()), storedEntry.GetArrayPositions(), 'x', []byte{}) - rv[i] = storedRow.Key() - } - return rv -} - -func (br *BackIndexRow) Key() []byte { - buf := make([]byte, br.KeySize()) - size, _ := br.KeyTo(buf) - return buf[:size] -} - -func (br *BackIndexRow) KeySize() int { - return len(br.doc) + 1 -} - -func (br *BackIndexRow) KeyTo(buf []byte) (int, error) { - buf[0] = 'b' - used := copy(buf[1:], br.doc) - return used + 1, nil -} - -func (br *BackIndexRow) Value() []byte { - buf := make([]byte, br.ValueSize()) - size, _ := br.ValueTo(buf) - return buf[:size] -} - -func (br *BackIndexRow) ValueSize() int { - birv := &BackIndexRowValue{ - TermsEntries: br.termsEntries, - StoredEntries: br.storedEntries, - } - return birv.Size() -} - -func (br *BackIndexRow) ValueTo(buf []byte) (int, error) { - birv := &BackIndexRowValue{ - TermsEntries: br.termsEntries, - StoredEntries: br.storedEntries, - } - return birv.MarshalTo(buf) -} - -func (br *BackIndexRow) String() string { - return fmt.Sprintf("Backindex DocId: `%s` Terms Entries: %v, Stored Entries: %v", string(br.doc), br.termsEntries, br.storedEntries) -} - -func NewBackIndexRow(docID []byte, entries []*BackIndexTermsEntry, storedFields []*BackIndexStoreEntry) *BackIndexRow { - return &BackIndexRow{ - doc: docID, - termsEntries: entries, - storedEntries: storedFields, - } -} - -func NewBackIndexRowKV(key, value []byte) (*BackIndexRow, error) { - rv := BackIndexRow{} - - buf := bytes.NewBuffer(key) - _, err := buf.ReadByte() // type - if err != nil { - return nil, err - } - - rv.doc, err = buf.ReadBytes(ByteSeparator) - if err == io.EOF && len(rv.doc) < 1 { - err = fmt.Errorf("invalid doc length 0 - % x", key) - } - if err != nil && err != io.EOF { - return nil, err - } else if err == nil { - rv.doc = rv.doc[:len(rv.doc)-1] // trim off separator byte - } - - var birv BackIndexRowValue - err = proto.Unmarshal(value, &birv) - if err != nil { - return nil, err - } - rv.termsEntries = birv.TermsEntries - rv.storedEntries = birv.StoredEntries - - return &rv, nil -} - -// STORED - -type StoredRow struct { - doc []byte - field uint16 - arrayPositions []uint64 - typ byte - value []byte -} - -func (s *StoredRow) Key() []byte { - buf := make([]byte, s.KeySize()) - size, _ := s.KeyTo(buf) - return buf[0:size] -} - -func (s *StoredRow) KeySize() int { - return 1 + len(s.doc) + 1 + 2 + (binary.MaxVarintLen64 * len(s.arrayPositions)) -} - -func (s *StoredRow) KeyTo(buf []byte) (int, error) { - docLen := len(s.doc) - buf[0] = 's' - copy(buf[1:], s.doc) - buf[1+docLen] = ByteSeparator - binary.LittleEndian.PutUint16(buf[1+docLen+1:], s.field) - bytesUsed := 1 + docLen + 1 + 2 - for _, arrayPosition := range s.arrayPositions { - varbytes := binary.PutUvarint(buf[bytesUsed:], arrayPosition) - bytesUsed += varbytes - } - return bytesUsed, nil -} - -func (s *StoredRow) Value() []byte { - buf := make([]byte, s.ValueSize()) - size, _ := s.ValueTo(buf) - return buf[:size] -} - -func (s *StoredRow) ValueSize() int { - return len(s.value) + 1 -} - -func (s *StoredRow) ValueTo(buf []byte) (int, error) { - buf[0] = s.typ - used := copy(buf[1:], s.value) - return used + 1, nil -} - -func (s *StoredRow) String() string { - return fmt.Sprintf("Document: %s Field %d, Array Positions: %v, Type: %s Value: %s", s.doc, s.field, s.arrayPositions, string(s.typ), s.value) -} - -func (s *StoredRow) ScanPrefixForDoc() []byte { - docLen := len(s.doc) - buf := make([]byte, 1+docLen+1) - buf[0] = 's' - copy(buf[1:], s.doc) - buf[1+docLen] = ByteSeparator - return buf -} - -func NewStoredRow(docID []byte, field uint16, arrayPositions []uint64, typ byte, value []byte) *StoredRow { - return &StoredRow{ - doc: docID, - field: field, - arrayPositions: arrayPositions, - typ: typ, - value: value, - } -} - -func NewStoredRowK(key []byte) (*StoredRow, error) { - rv := StoredRow{} - - buf := bytes.NewBuffer(key) - _, err := buf.ReadByte() // type - if err != nil { - return nil, err - } - - rv.doc, err = buf.ReadBytes(ByteSeparator) - if len(rv.doc) < 2 { // 1 for min doc id length, 1 for separator - err = fmt.Errorf("invalid doc length 0") - return nil, err - } - - rv.doc = rv.doc[:len(rv.doc)-1] // trim off separator byte - - err = binary.Read(buf, binary.LittleEndian, &rv.field) - if err != nil { - return nil, err - } - - rv.arrayPositions = make([]uint64, 0) - nextArrayPos, err := binary.ReadUvarint(buf) - for err == nil { - rv.arrayPositions = append(rv.arrayPositions, nextArrayPos) - nextArrayPos, err = binary.ReadUvarint(buf) - } - return &rv, nil -} - -func NewStoredRowKV(key, value []byte) (*StoredRow, error) { - rv, err := NewStoredRowK(key) - if err != nil { - return nil, err - } - rv.typ = value[0] - rv.value = value[1:] - return rv, nil -} - -type backIndexFieldTermVisitor func(field uint32, term []byte) - -// visitBackIndexRow is designed to process a protobuf encoded -// value, without creating unnecessary garbage. Instead values are passed -// to a callback, inspected first, and only copied if necessary. -// Due to the fact that this borrows from generated code, it must be marnually -// updated if the protobuf definition changes. -// -// This code originates from: -// func (m *BackIndexRowValue) Unmarshal(data []byte) error -// the sections which create garbage or parse unintersting sections -// have been commented out. This was done by design to allow for easier -// merging in the future if that original function is regenerated -func visitBackIndexRow(data []byte, callback backIndexFieldTermVisitor) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TermsEntries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - postIndex := iNdEx + msglen - if msglen < 0 { - return ErrInvalidLengthUpsidedown - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - // dont parse term entries - // m.TermsEntries = append(m.TermsEntries, &BackIndexTermsEntry{}) - // if err := m.TermsEntries[len(m.TermsEntries)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - // return err - // } - // instead, inspect them - if err := visitBackIndexRowFieldTerms(data[iNdEx:postIndex], callback); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoredEntries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - postIndex := iNdEx + msglen - if msglen < 0 { - return ErrInvalidLengthUpsidedown - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - // don't parse stored entries - // m.StoredEntries = append(m.StoredEntries, &BackIndexStoreEntry{}) - // if err := m.StoredEntries[len(m.StoredEntries)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - // return err - // } - iNdEx = postIndex - default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire - skippy, err := skipUpsidedown(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthUpsidedown - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - // don't track unrecognized data - //m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - return nil -} - -// visitBackIndexRowFieldTerms is designed to process a protobuf encoded -// sub-value within the BackIndexRowValue, without creating unnecessary garbage. -// Instead values are passed to a callback, inspected first, and only copied if -// necessary. Due to the fact that this borrows from generated code, it must -// be marnually updated if the protobuf definition changes. -// -// This code originates from: -// func (m *BackIndexTermsEntry) Unmarshal(data []byte) error { -// the sections which create garbage or parse uninteresting sections -// have been commented out. This was done by design to allow for easier -// merging in the future if that original function is regenerated -func visitBackIndexRowFieldTerms(data []byte, callback backIndexFieldTermVisitor) error { - var theField uint32 - - var hasFields [1]uint64 - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) - } - var v uint32 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - // m.Field = &v - theField = v - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Terms", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - postIndex := iNdEx + int(stringLen) - if postIndex > l { - return io.ErrUnexpectedEOF - } - //m.Terms = append(m.Terms, string(data[iNdEx:postIndex])) - callback(theField, data[iNdEx:postIndex]) - iNdEx = postIndex - default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire - skippy, err := skipUpsidedown(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthUpsidedown - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - //m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - // if hasFields[0]&uint64(0x00000001) == 0 { - // return new(github_com_golang_protobuf_proto.RequiredNotSetError) - // } - - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/row_merge.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/row_merge.go deleted file mode 100644 index 39172ade6..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/row_merge.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package upsidedown - -import ( - "encoding/binary" -) - -var mergeOperator upsideDownMerge - -var dictionaryTermIncr []byte -var dictionaryTermDecr []byte - -func init() { - dictionaryTermIncr = make([]byte, 8) - binary.LittleEndian.PutUint64(dictionaryTermIncr, uint64(1)) - dictionaryTermDecr = make([]byte, 8) - var negOne = int64(-1) - binary.LittleEndian.PutUint64(dictionaryTermDecr, uint64(negOne)) -} - -type upsideDownMerge struct{} - -func (m *upsideDownMerge) FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) { - // set up record based on key - dr, err := NewDictionaryRowK(key) - if err != nil { - return nil, false - } - if len(existingValue) > 0 { - // if existing value, parse it - err = dr.parseDictionaryV(existingValue) - if err != nil { - return nil, false - } - } - - // now process operands - for _, operand := range operands { - next := int64(binary.LittleEndian.Uint64(operand)) - if next < 0 && uint64(-next) > dr.count { - // subtracting next from existing would overflow - dr.count = 0 - } else if next < 0 { - dr.count -= uint64(-next) - } else { - dr.count += uint64(next) - } - } - - return dr.Value(), true -} - -func (m *upsideDownMerge) PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) { - left := int64(binary.LittleEndian.Uint64(leftOperand)) - right := int64(binary.LittleEndian.Uint64(rightOperand)) - rv := make([]byte, 8) - binary.LittleEndian.PutUint64(rv, uint64(left+right)) - return rv, true -} - -func (m *upsideDownMerge) Name() string { - return "upsideDownMerge" -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/stats.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/stats.go deleted file mode 100644 index c1b4ddc01..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/stats.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package upsidedown - -import ( - "encoding/json" - "sync/atomic" - - "github.com/blevesearch/upsidedown_store_api" -) - -type indexStat struct { - updates, deletes, batches, errors uint64 - analysisTime, indexTime uint64 - termSearchersStarted uint64 - termSearchersFinished uint64 - numPlainTextBytesIndexed uint64 - i *UpsideDownCouch -} - -func (i *indexStat) statsMap() map[string]interface{} { - m := map[string]interface{}{} - m["updates"] = atomic.LoadUint64(&i.updates) - m["deletes"] = atomic.LoadUint64(&i.deletes) - m["batches"] = atomic.LoadUint64(&i.batches) - m["errors"] = atomic.LoadUint64(&i.errors) - m["analysis_time"] = atomic.LoadUint64(&i.analysisTime) - m["index_time"] = atomic.LoadUint64(&i.indexTime) - m["term_searchers_started"] = atomic.LoadUint64(&i.termSearchersStarted) - m["term_searchers_finished"] = atomic.LoadUint64(&i.termSearchersFinished) - m["num_plain_text_bytes_indexed"] = atomic.LoadUint64(&i.numPlainTextBytesIndexed) - - if o, ok := i.i.store.(store.KVStoreStats); ok { - m["kv"] = o.StatsMap() - } - - return m -} - -func (i *indexStat) MarshalJSON() ([]byte, error) { - m := i.statsMap() - return json.Marshal(m) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/iterator.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/iterator.go deleted file mode 100644 index cf4da87c3..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/iterator.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package boltdb - -import ( - "bytes" - - bolt "go.etcd.io/bbolt" -) - -type Iterator struct { - store *Store - tx *bolt.Tx - cursor *bolt.Cursor - prefix []byte - start []byte - end []byte - valid bool - key []byte - val []byte -} - -func (i *Iterator) updateValid() { - i.valid = (i.key != nil) - if i.valid { - if i.prefix != nil { - i.valid = bytes.HasPrefix(i.key, i.prefix) - } else if i.end != nil { - i.valid = bytes.Compare(i.key, i.end) < 0 - } - } -} - -func (i *Iterator) Seek(k []byte) { - if i.start != nil && bytes.Compare(k, i.start) < 0 { - k = i.start - } - if i.prefix != nil && !bytes.HasPrefix(k, i.prefix) { - if bytes.Compare(k, i.prefix) < 0 { - k = i.prefix - } else { - i.valid = false - return - } - } - i.key, i.val = i.cursor.Seek(k) - i.updateValid() -} - -func (i *Iterator) Next() { - i.key, i.val = i.cursor.Next() - i.updateValid() -} - -func (i *Iterator) Current() ([]byte, []byte, bool) { - return i.key, i.val, i.valid -} - -func (i *Iterator) Key() []byte { - return i.key -} - -func (i *Iterator) Value() []byte { - return i.val -} - -func (i *Iterator) Valid() bool { - return i.valid -} - -func (i *Iterator) Close() error { - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/reader.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/reader.go deleted file mode 100644 index 79513f600..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/reader.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package boltdb - -import ( - store "github.com/blevesearch/upsidedown_store_api" - bolt "go.etcd.io/bbolt" -) - -type Reader struct { - store *Store - tx *bolt.Tx - bucket *bolt.Bucket -} - -func (r *Reader) Get(key []byte) ([]byte, error) { - var rv []byte - v := r.bucket.Get(key) - if v != nil { - rv = make([]byte, len(v)) - copy(rv, v) - } - return rv, nil -} - -func (r *Reader) MultiGet(keys [][]byte) ([][]byte, error) { - return store.MultiGet(r, keys) -} - -func (r *Reader) PrefixIterator(prefix []byte) store.KVIterator { - cursor := r.bucket.Cursor() - - rv := &Iterator{ - store: r.store, - tx: r.tx, - cursor: cursor, - prefix: prefix, - } - - rv.Seek(prefix) - return rv -} - -func (r *Reader) RangeIterator(start, end []byte) store.KVIterator { - cursor := r.bucket.Cursor() - - rv := &Iterator{ - store: r.store, - tx: r.tx, - cursor: cursor, - start: start, - end: end, - } - - rv.Seek(start) - return rv -} - -func (r *Reader) Close() error { - return r.tx.Rollback() -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/store.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/store.go deleted file mode 100644 index bc99275e1..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/store.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package boltdb implements a store.KVStore on top of BoltDB. It supports the -// following options: -// -// "bucket" (string): the name of BoltDB bucket to use, defaults to "bleve". -// -// "nosync" (bool): if true, set boltdb.DB.NoSync to true. It speeds up index -// operations in exchange of losing integrity guarantees if indexation aborts -// without closing the index. Use it when rebuilding indexes from zero. -package boltdb - -import ( - "bytes" - "encoding/json" - "fmt" - "os" - - "github.com/blevesearch/bleve/v2/registry" - store "github.com/blevesearch/upsidedown_store_api" - bolt "go.etcd.io/bbolt" -) - -const ( - Name = "boltdb" - defaultCompactBatchSize = 100 -) - -type Store struct { - path string - bucket string - db *bolt.DB - noSync bool - fillPercent float64 - mo store.MergeOperator -} - -func New(mo store.MergeOperator, config map[string]interface{}) (store.KVStore, error) { - path, ok := config["path"].(string) - if !ok { - return nil, fmt.Errorf("must specify path") - } - if path == "" { - return nil, os.ErrInvalid - } - - bucket, ok := config["bucket"].(string) - if !ok { - bucket = "bleve" - } - - noSync, _ := config["nosync"].(bool) - - fillPercent, ok := config["fillPercent"].(float64) - if !ok { - fillPercent = bolt.DefaultFillPercent - } - - bo := &bolt.Options{} - ro, ok := config["read_only"].(bool) - if ok { - bo.ReadOnly = ro - } - - if initialMmapSize, ok := config["initialMmapSize"].(int); ok { - bo.InitialMmapSize = initialMmapSize - } else if initialMmapSize, ok := config["initialMmapSize"].(float64); ok { - bo.InitialMmapSize = int(initialMmapSize) - } - - db, err := bolt.Open(path, 0600, bo) - if err != nil { - return nil, err - } - db.NoSync = noSync - - if !bo.ReadOnly { - err = db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists([]byte(bucket)) - - return err - }) - if err != nil { - return nil, err - } - } - - rv := Store{ - path: path, - bucket: bucket, - db: db, - mo: mo, - noSync: noSync, - fillPercent: fillPercent, - } - return &rv, nil -} - -func (bs *Store) Close() error { - return bs.db.Close() -} - -func (bs *Store) Reader() (store.KVReader, error) { - tx, err := bs.db.Begin(false) - if err != nil { - return nil, err - } - return &Reader{ - store: bs, - tx: tx, - bucket: tx.Bucket([]byte(bs.bucket)), - }, nil -} - -func (bs *Store) Writer() (store.KVWriter, error) { - return &Writer{ - store: bs, - }, nil -} - -func (bs *Store) Stats() json.Marshaler { - return &stats{ - s: bs, - } -} - -// CompactWithBatchSize removes DictionaryTerm entries with a count of zero (in batchSize batches) -// Removing entries is a workaround for github issue #374. -func (bs *Store) CompactWithBatchSize(batchSize int) error { - for { - cnt := 0 - err := bs.db.Batch(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte(bs.bucket)).Cursor() - prefix := []byte("d") - - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { - if bytes.Equal(v, []byte{0}) { - cnt++ - if err := c.Delete(); err != nil { - return err - } - if cnt == batchSize { - break - } - } - - } - return nil - }) - if err != nil { - return err - } - - if cnt == 0 { - break - } - } - return nil -} - -// Compact calls CompactWithBatchSize with a default batch size of 100. This is a workaround -// for github issue #374. -func (bs *Store) Compact() error { - return bs.CompactWithBatchSize(defaultCompactBatchSize) -} - -func init() { - registry.RegisterKVStore(Name, New) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/writer.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/writer.go deleted file mode 100644 index c25583cab..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/writer.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package boltdb - -import ( - "fmt" - - store "github.com/blevesearch/upsidedown_store_api" -) - -type Writer struct { - store *Store -} - -func (w *Writer) NewBatch() store.KVBatch { - return store.NewEmulatedBatch(w.store.mo) -} - -func (w *Writer) NewBatchEx(options store.KVBatchOptions) ([]byte, store.KVBatch, error) { - return make([]byte, options.TotalBytes), w.NewBatch(), nil -} - -func (w *Writer) ExecuteBatch(batch store.KVBatch) (err error) { - - emulatedBatch, ok := batch.(*store.EmulatedBatch) - if !ok { - return fmt.Errorf("wrong type of batch") - } - - tx, err := w.store.db.Begin(true) - if err != nil { - return - } - // defer function to ensure that once started, - // we either Commit tx or Rollback - defer func() { - // if nothing went wrong, commit - if err == nil { - // careful to catch error here too - err = tx.Commit() - } else { - // caller should see error that caused abort, - // not success or failure of Rollback itself - _ = tx.Rollback() - } - }() - - bucket := tx.Bucket([]byte(w.store.bucket)) - bucket.FillPercent = w.store.fillPercent - - for k, mergeOps := range emulatedBatch.Merger.Merges { - kb := []byte(k) - existingVal := bucket.Get(kb) - mergedVal, fullMergeOk := w.store.mo.FullMerge(kb, existingVal, mergeOps) - if !fullMergeOk { - err = fmt.Errorf("merge operator returned failure") - return - } - err = bucket.Put(kb, mergedVal) - if err != nil { - return - } - } - - for _, op := range emulatedBatch.Ops { - if op.V != nil { - err = bucket.Put(op.K, op.V) - if err != nil { - return - } - } else { - err = bucket.Delete(op.K) - if err != nil { - return - } - } - } - return -} - -func (w *Writer) Close() error { - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/iterator.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/iterator.go deleted file mode 100644 index 092ccf240..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/iterator.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gtreap provides an in-memory implementation of the -// KVStore interfaces using the gtreap balanced-binary treap, -// copy-on-write data structure. -package gtreap - -import ( - "bytes" - "sync" - - "github.com/steveyen/gtreap" -) - -type Iterator struct { - t *gtreap.Treap - - m sync.Mutex - cancelCh chan struct{} - nextCh chan *Item - curr *Item - currOk bool - - prefix []byte - start []byte - end []byte -} - -func (w *Iterator) Seek(k []byte) { - if w.start != nil && bytes.Compare(k, w.start) < 0 { - k = w.start - } - if w.prefix != nil && !bytes.HasPrefix(k, w.prefix) { - if bytes.Compare(k, w.prefix) < 0 { - k = w.prefix - } else { - var end []byte - for i := len(w.prefix) - 1; i >= 0; i-- { - c := w.prefix[i] - if c < 0xff { - end = make([]byte, i+1) - copy(end, w.prefix) - end[i] = c + 1 - break - } - } - k = end - } - } - w.restart(&Item{k: k}) -} - -func (w *Iterator) restart(start *Item) *Iterator { - cancelCh := make(chan struct{}) - nextCh := make(chan *Item, 1) - - w.m.Lock() - if w.cancelCh != nil { - close(w.cancelCh) - } - w.cancelCh = cancelCh - w.nextCh = nextCh - w.curr = nil - w.currOk = false - w.m.Unlock() - - go func() { - if start != nil { - w.t.VisitAscend(start, func(itm gtreap.Item) bool { - select { - case <-cancelCh: - return false - case nextCh <- itm.(*Item): - return true - } - }) - } - close(nextCh) - }() - - w.Next() - - return w -} - -func (w *Iterator) Next() { - w.m.Lock() - nextCh := w.nextCh - w.m.Unlock() - w.curr, w.currOk = <-nextCh -} - -func (w *Iterator) Current() ([]byte, []byte, bool) { - w.m.Lock() - defer w.m.Unlock() - if !w.currOk || w.curr == nil { - return nil, nil, false - } - if w.prefix != nil && !bytes.HasPrefix(w.curr.k, w.prefix) { - return nil, nil, false - } else if w.end != nil && bytes.Compare(w.curr.k, w.end) >= 0 { - return nil, nil, false - } - return w.curr.k, w.curr.v, w.currOk -} - -func (w *Iterator) Key() []byte { - k, _, ok := w.Current() - if !ok { - return nil - } - return k -} - -func (w *Iterator) Value() []byte { - _, v, ok := w.Current() - if !ok { - return nil - } - return v -} - -func (w *Iterator) Valid() bool { - _, _, ok := w.Current() - return ok -} - -func (w *Iterator) Close() error { - w.m.Lock() - if w.cancelCh != nil { - close(w.cancelCh) - } - w.cancelCh = nil - w.nextCh = nil - w.curr = nil - w.currOk = false - w.m.Unlock() - - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/reader.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/reader.go deleted file mode 100644 index 34df81302..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/reader.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gtreap provides an in-memory implementation of the -// KVStore interfaces using the gtreap balanced-binary treap, -// copy-on-write data structure. -package gtreap - -import ( - "github.com/blevesearch/upsidedown_store_api" - - "github.com/steveyen/gtreap" -) - -type Reader struct { - t *gtreap.Treap -} - -func (w *Reader) Get(k []byte) (v []byte, err error) { - var rv []byte - itm := w.t.Get(&Item{k: k}) - if itm != nil { - rv = make([]byte, len(itm.(*Item).v)) - copy(rv, itm.(*Item).v) - return rv, nil - } - return nil, nil -} - -func (r *Reader) MultiGet(keys [][]byte) ([][]byte, error) { - return store.MultiGet(r, keys) -} - -func (w *Reader) PrefixIterator(k []byte) store.KVIterator { - rv := Iterator{ - t: w.t, - prefix: k, - } - rv.restart(&Item{k: k}) - return &rv -} - -func (w *Reader) RangeIterator(start, end []byte) store.KVIterator { - rv := Iterator{ - t: w.t, - start: start, - end: end, - } - rv.restart(&Item{k: start}) - return &rv -} - -func (w *Reader) Close() error { - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/store.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/store.go deleted file mode 100644 index 8d1f5887d..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/store.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gtreap provides an in-memory implementation of the -// KVStore interfaces using the gtreap balanced-binary treap, -// copy-on-write data structure. - -package gtreap - -import ( - "bytes" - "fmt" - "os" - "sync" - - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/upsidedown_store_api" - "github.com/steveyen/gtreap" -) - -const Name = "gtreap" - -type Store struct { - m sync.Mutex - t *gtreap.Treap - mo store.MergeOperator -} - -type Item struct { - k []byte - v []byte -} - -func itemCompare(a, b interface{}) int { - return bytes.Compare(a.(*Item).k, b.(*Item).k) -} - -func New(mo store.MergeOperator, config map[string]interface{}) (store.KVStore, error) { - path, ok := config["path"].(string) - if !ok { - return nil, fmt.Errorf("must specify path") - } - if path != "" { - return nil, os.ErrInvalid - } - - rv := Store{ - t: gtreap.NewTreap(itemCompare), - mo: mo, - } - return &rv, nil -} - -func (s *Store) Close() error { - return nil -} - -func (s *Store) Reader() (store.KVReader, error) { - s.m.Lock() - t := s.t - s.m.Unlock() - return &Reader{t: t}, nil -} - -func (s *Store) Writer() (store.KVWriter, error) { - return &Writer{s: s}, nil -} - -func init() { - registry.RegisterKVStore(Name, New) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/writer.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/writer.go deleted file mode 100644 index 80aa15b19..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/writer.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gtreap provides an in-memory implementation of the -// KVStore interfaces using the gtreap balanced-binary treap, -// copy-on-write data structure. -package gtreap - -import ( - "fmt" - "math/rand" - - "github.com/blevesearch/upsidedown_store_api" -) - -type Writer struct { - s *Store -} - -func (w *Writer) NewBatch() store.KVBatch { - return store.NewEmulatedBatch(w.s.mo) -} - -func (w *Writer) NewBatchEx(options store.KVBatchOptions) ([]byte, store.KVBatch, error) { - return make([]byte, options.TotalBytes), w.NewBatch(), nil -} - -func (w *Writer) ExecuteBatch(batch store.KVBatch) error { - - emulatedBatch, ok := batch.(*store.EmulatedBatch) - if !ok { - return fmt.Errorf("wrong type of batch") - } - - w.s.m.Lock() - for k, mergeOps := range emulatedBatch.Merger.Merges { - kb := []byte(k) - var existingVal []byte - existingItem := w.s.t.Get(&Item{k: kb}) - if existingItem != nil { - existingVal = w.s.t.Get(&Item{k: kb}).(*Item).v - } - mergedVal, fullMergeOk := w.s.mo.FullMerge(kb, existingVal, mergeOps) - if !fullMergeOk { - return fmt.Errorf("merge operator returned failure") - } - w.s.t = w.s.t.Upsert(&Item{k: kb, v: mergedVal}, rand.Int()) - } - - for _, op := range emulatedBatch.Ops { - if op.V != nil { - w.s.t = w.s.t.Upsert(&Item{k: op.K, v: op.V}, rand.Int()) - } else { - w.s.t = w.s.t.Delete(&Item{k: op.K}) - } - } - w.s.m.Unlock() - - return nil -} - -func (w *Writer) Close() error { - w.s = nil - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.go deleted file mode 100644 index d67ee03fb..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.go +++ /dev/null @@ -1,1069 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate protoc --gofast_out=. upsidedown.proto - -package upsidedown - -import ( - "encoding/binary" - "encoding/json" - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - "github.com/blevesearch/bleve/v2/document" - "github.com/blevesearch/bleve/v2/registry" - index "github.com/blevesearch/bleve_index_api" - store "github.com/blevesearch/upsidedown_store_api" - - "github.com/golang/protobuf/proto" -) - -const Name = "upside_down" - -// RowBufferSize should ideally this is sized to be the smallest -// size that can contain an index row key and its corresponding -// value. It is not a limit, if need be a larger buffer is -// allocated, but performance will be more optimal if *most* -// rows fit this size. -const RowBufferSize = 4 * 1024 - -var VersionKey = []byte{'v'} - -const Version uint8 = 7 - -var IncompatibleVersion = fmt.Errorf("incompatible version, %d is supported", Version) - -var ErrorUnknownStorageType = fmt.Errorf("unknown storage type") - -type UpsideDownCouch struct { - version uint8 - path string - storeName string - storeConfig map[string]interface{} - store store.KVStore - fieldCache *FieldCache - analysisQueue *index.AnalysisQueue - stats *indexStat - - m sync.RWMutex - // fields protected by m - docCount uint64 - - writeMutex sync.Mutex -} - -type docBackIndexRow struct { - docID string - doc index.Document // If deletion, doc will be nil. - backIndexRow *BackIndexRow -} - -func NewUpsideDownCouch(storeName string, storeConfig map[string]interface{}, analysisQueue *index.AnalysisQueue) (index.Index, error) { - rv := &UpsideDownCouch{ - version: Version, - fieldCache: NewFieldCache(), - storeName: storeName, - storeConfig: storeConfig, - analysisQueue: analysisQueue, - } - rv.stats = &indexStat{i: rv} - return rv, nil -} - -func (udc *UpsideDownCouch) init(kvwriter store.KVWriter) (err error) { - // version marker - rowsAll := [][]UpsideDownCouchRow{ - {NewVersionRow(udc.version)}, - } - - err = udc.batchRows(kvwriter, nil, rowsAll, nil) - return -} - -func (udc *UpsideDownCouch) loadSchema(kvreader store.KVReader) (err error) { - - it := kvreader.PrefixIterator([]byte{'f'}) - defer func() { - if cerr := it.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - key, val, valid := it.Current() - for valid { - var fieldRow *FieldRow - fieldRow, err = NewFieldRowKV(key, val) - if err != nil { - return - } - udc.fieldCache.AddExisting(fieldRow.name, fieldRow.index) - - it.Next() - key, val, valid = it.Current() - } - - val, err = kvreader.Get([]byte{'v'}) - if err != nil { - return - } - var vr *VersionRow - vr, err = NewVersionRowKV([]byte{'v'}, val) - if err != nil { - return - } - if vr.version != Version { - err = IncompatibleVersion - return - } - - return -} - -var rowBufferPool sync.Pool - -func GetRowBuffer() []byte { - if rb, ok := rowBufferPool.Get().([]byte); ok { - return rb - } else { - return make([]byte, RowBufferSize) - } -} - -func PutRowBuffer(buf []byte) { - rowBufferPool.Put(buf) -} - -func (udc *UpsideDownCouch) batchRows(writer store.KVWriter, addRowsAll [][]UpsideDownCouchRow, updateRowsAll [][]UpsideDownCouchRow, deleteRowsAll [][]UpsideDownCouchRow) (err error) { - dictionaryDeltas := make(map[string]int64) - - // count up bytes needed for buffering. - addNum := 0 - addKeyBytes := 0 - addValBytes := 0 - - updateNum := 0 - updateKeyBytes := 0 - updateValBytes := 0 - - deleteNum := 0 - deleteKeyBytes := 0 - - rowBuf := GetRowBuffer() - - for _, addRows := range addRowsAll { - for _, row := range addRows { - tfr, ok := row.(*TermFrequencyRow) - if ok { - if tfr.DictionaryRowKeySize() > len(rowBuf) { - rowBuf = make([]byte, tfr.DictionaryRowKeySize()) - } - dictKeySize, err := tfr.DictionaryRowKeyTo(rowBuf) - if err != nil { - return err - } - dictionaryDeltas[string(rowBuf[:dictKeySize])] += 1 - } - addKeyBytes += row.KeySize() - addValBytes += row.ValueSize() - } - addNum += len(addRows) - } - - for _, updateRows := range updateRowsAll { - for _, row := range updateRows { - updateKeyBytes += row.KeySize() - updateValBytes += row.ValueSize() - } - updateNum += len(updateRows) - } - - for _, deleteRows := range deleteRowsAll { - for _, row := range deleteRows { - tfr, ok := row.(*TermFrequencyRow) - if ok { - // need to decrement counter - if tfr.DictionaryRowKeySize() > len(rowBuf) { - rowBuf = make([]byte, tfr.DictionaryRowKeySize()) - } - dictKeySize, err := tfr.DictionaryRowKeyTo(rowBuf) - if err != nil { - return err - } - dictionaryDeltas[string(rowBuf[:dictKeySize])] -= 1 - } - deleteKeyBytes += row.KeySize() - } - deleteNum += len(deleteRows) - } - - PutRowBuffer(rowBuf) - - mergeNum := len(dictionaryDeltas) - mergeKeyBytes := 0 - mergeValBytes := mergeNum * DictionaryRowMaxValueSize - - for dictRowKey := range dictionaryDeltas { - mergeKeyBytes += len(dictRowKey) - } - - // prepare batch - totBytes := addKeyBytes + addValBytes + - updateKeyBytes + updateValBytes + - deleteKeyBytes + - 2*(mergeKeyBytes+mergeValBytes) - - buf, wb, err := writer.NewBatchEx(store.KVBatchOptions{ - TotalBytes: totBytes, - NumSets: addNum + updateNum, - NumDeletes: deleteNum, - NumMerges: mergeNum, - }) - if err != nil { - return err - } - defer func() { - _ = wb.Close() - }() - - // fill the batch - for _, addRows := range addRowsAll { - for _, row := range addRows { - keySize, err := row.KeyTo(buf) - if err != nil { - return err - } - valSize, err := row.ValueTo(buf[keySize:]) - if err != nil { - return err - } - wb.Set(buf[:keySize], buf[keySize:keySize+valSize]) - buf = buf[keySize+valSize:] - } - } - - for _, updateRows := range updateRowsAll { - for _, row := range updateRows { - keySize, err := row.KeyTo(buf) - if err != nil { - return err - } - valSize, err := row.ValueTo(buf[keySize:]) - if err != nil { - return err - } - wb.Set(buf[:keySize], buf[keySize:keySize+valSize]) - buf = buf[keySize+valSize:] - } - } - - for _, deleteRows := range deleteRowsAll { - for _, row := range deleteRows { - keySize, err := row.KeyTo(buf) - if err != nil { - return err - } - wb.Delete(buf[:keySize]) - buf = buf[keySize:] - } - } - - for dictRowKey, delta := range dictionaryDeltas { - dictRowKeyLen := copy(buf, dictRowKey) - binary.LittleEndian.PutUint64(buf[dictRowKeyLen:], uint64(delta)) - wb.Merge(buf[:dictRowKeyLen], buf[dictRowKeyLen:dictRowKeyLen+DictionaryRowMaxValueSize]) - buf = buf[dictRowKeyLen+DictionaryRowMaxValueSize:] - } - - // write out the batch - return writer.ExecuteBatch(wb) -} - -func (udc *UpsideDownCouch) Open() (err error) { - // acquire the write mutex for the duration of Open() - udc.writeMutex.Lock() - defer udc.writeMutex.Unlock() - - // open the kv store - storeConstructor := registry.KVStoreConstructorByName(udc.storeName) - if storeConstructor == nil { - err = ErrorUnknownStorageType - return - } - - // now open the store - udc.store, err = storeConstructor(&mergeOperator, udc.storeConfig) - if err != nil { - return - } - - // start a reader to look at the index - var kvreader store.KVReader - kvreader, err = udc.store.Reader() - if err != nil { - return - } - - var value []byte - value, err = kvreader.Get(VersionKey) - if err != nil { - _ = kvreader.Close() - return - } - - if value != nil { - err = udc.loadSchema(kvreader) - if err != nil { - _ = kvreader.Close() - return - } - - // set doc count - udc.m.Lock() - udc.docCount, err = udc.countDocs(kvreader) - udc.m.Unlock() - - err = kvreader.Close() - } else { - // new index, close the reader and open writer to init - err = kvreader.Close() - if err != nil { - return - } - - var kvwriter store.KVWriter - kvwriter, err = udc.store.Writer() - if err != nil { - return - } - defer func() { - if cerr := kvwriter.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - // init the index - err = udc.init(kvwriter) - } - - return -} - -func (udc *UpsideDownCouch) countDocs(kvreader store.KVReader) (count uint64, err error) { - it := kvreader.PrefixIterator([]byte{'b'}) - defer func() { - if cerr := it.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - _, _, valid := it.Current() - for valid { - count++ - it.Next() - _, _, valid = it.Current() - } - - return -} - -func (udc *UpsideDownCouch) rowCount() (count uint64, err error) { - // start an isolated reader for use during the rowcount - kvreader, err := udc.store.Reader() - if err != nil { - return - } - defer func() { - if cerr := kvreader.Close(); err == nil && cerr != nil { - err = cerr - } - }() - it := kvreader.RangeIterator(nil, nil) - defer func() { - if cerr := it.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - _, _, valid := it.Current() - for valid { - count++ - it.Next() - _, _, valid = it.Current() - } - - return -} - -func (udc *UpsideDownCouch) Close() error { - return udc.store.Close() -} - -func (udc *UpsideDownCouch) Update(doc index.Document) (err error) { - // do analysis before acquiring write lock - analysisStart := time.Now() - resultChan := make(chan *AnalysisResult) - - // put the work on the queue - udc.analysisQueue.Queue(func() { - ar := udc.analyze(doc) - resultChan <- ar - }) - - // wait for the result - result := <-resultChan - close(resultChan) - atomic.AddUint64(&udc.stats.analysisTime, uint64(time.Since(analysisStart))) - - udc.writeMutex.Lock() - defer udc.writeMutex.Unlock() - - // open a reader for backindex lookup - var kvreader store.KVReader - kvreader, err = udc.store.Reader() - if err != nil { - return - } - - // first we lookup the backindex row for the doc id if it exists - // lookup the back index row - var backIndexRow *BackIndexRow - backIndexRow, err = backIndexRowForDoc(kvreader, index.IndexInternalID(doc.ID())) - if err != nil { - _ = kvreader.Close() - atomic.AddUint64(&udc.stats.errors, 1) - return - } - - err = kvreader.Close() - if err != nil { - return - } - - return udc.UpdateWithAnalysis(doc, result, backIndexRow) -} - -func (udc *UpsideDownCouch) UpdateWithAnalysis(doc index.Document, - result *AnalysisResult, backIndexRow *BackIndexRow) (err error) { - // start a writer for this update - indexStart := time.Now() - var kvwriter store.KVWriter - kvwriter, err = udc.store.Writer() - if err != nil { - return - } - defer func() { - if cerr := kvwriter.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - // prepare a list of rows - var addRowsAll [][]UpsideDownCouchRow - var updateRowsAll [][]UpsideDownCouchRow - var deleteRowsAll [][]UpsideDownCouchRow - - addRows, updateRows, deleteRows := udc.mergeOldAndNew(backIndexRow, result.Rows) - if len(addRows) > 0 { - addRowsAll = append(addRowsAll, addRows) - } - if len(updateRows) > 0 { - updateRowsAll = append(updateRowsAll, updateRows) - } - if len(deleteRows) > 0 { - deleteRowsAll = append(deleteRowsAll, deleteRows) - } - - err = udc.batchRows(kvwriter, addRowsAll, updateRowsAll, deleteRowsAll) - if err == nil && backIndexRow == nil { - udc.m.Lock() - udc.docCount++ - udc.m.Unlock() - } - atomic.AddUint64(&udc.stats.indexTime, uint64(time.Since(indexStart))) - if err == nil { - atomic.AddUint64(&udc.stats.updates, 1) - atomic.AddUint64(&udc.stats.numPlainTextBytesIndexed, doc.NumPlainTextBytes()) - } else { - atomic.AddUint64(&udc.stats.errors, 1) - } - return -} - -func (udc *UpsideDownCouch) mergeOldAndNew(backIndexRow *BackIndexRow, rows []IndexRow) (addRows []UpsideDownCouchRow, updateRows []UpsideDownCouchRow, deleteRows []UpsideDownCouchRow) { - addRows = make([]UpsideDownCouchRow, 0, len(rows)) - - if backIndexRow == nil { - addRows = addRows[0:len(rows)] - for i, row := range rows { - addRows[i] = row - } - return addRows, nil, nil - } - - updateRows = make([]UpsideDownCouchRow, 0, len(rows)) - deleteRows = make([]UpsideDownCouchRow, 0, len(rows)) - - var existingTermKeys map[string]struct{} - backIndexTermKeys := backIndexRow.AllTermKeys() - if len(backIndexTermKeys) > 0 { - existingTermKeys = make(map[string]struct{}, len(backIndexTermKeys)) - for _, key := range backIndexTermKeys { - existingTermKeys[string(key)] = struct{}{} - } - } - - var existingStoredKeys map[string]struct{} - backIndexStoredKeys := backIndexRow.AllStoredKeys() - if len(backIndexStoredKeys) > 0 { - existingStoredKeys = make(map[string]struct{}, len(backIndexStoredKeys)) - for _, key := range backIndexStoredKeys { - existingStoredKeys[string(key)] = struct{}{} - } - } - - keyBuf := GetRowBuffer() - for _, row := range rows { - switch row := row.(type) { - case *TermFrequencyRow: - if existingTermKeys != nil { - if row.KeySize() > len(keyBuf) { - keyBuf = make([]byte, row.KeySize()) - } - keySize, _ := row.KeyTo(keyBuf) - if _, ok := existingTermKeys[string(keyBuf[:keySize])]; ok { - updateRows = append(updateRows, row) - delete(existingTermKeys, string(keyBuf[:keySize])) - continue - } - } - addRows = append(addRows, row) - case *StoredRow: - if existingStoredKeys != nil { - if row.KeySize() > len(keyBuf) { - keyBuf = make([]byte, row.KeySize()) - } - keySize, _ := row.KeyTo(keyBuf) - if _, ok := existingStoredKeys[string(keyBuf[:keySize])]; ok { - updateRows = append(updateRows, row) - delete(existingStoredKeys, string(keyBuf[:keySize])) - continue - } - } - addRows = append(addRows, row) - default: - updateRows = append(updateRows, row) - } - } - PutRowBuffer(keyBuf) - - // any of the existing rows that weren't updated need to be deleted - for existingTermKey := range existingTermKeys { - termFreqRow, err := NewTermFrequencyRowK([]byte(existingTermKey)) - if err == nil { - deleteRows = append(deleteRows, termFreqRow) - } - } - - // any of the existing stored fields that weren't updated need to be deleted - for existingStoredKey := range existingStoredKeys { - storedRow, err := NewStoredRowK([]byte(existingStoredKey)) - if err == nil { - deleteRows = append(deleteRows, storedRow) - } - } - - return addRows, updateRows, deleteRows -} - -func (udc *UpsideDownCouch) storeField(docID []byte, field index.Field, fieldIndex uint16, rows []IndexRow, backIndexStoredEntries []*BackIndexStoreEntry) ([]IndexRow, []*BackIndexStoreEntry) { - fieldType := field.EncodedFieldType() - storedRow := NewStoredRow(docID, fieldIndex, field.ArrayPositions(), fieldType, field.Value()) - - // record the back index entry - backIndexStoredEntry := BackIndexStoreEntry{Field: proto.Uint32(uint32(fieldIndex)), ArrayPositions: field.ArrayPositions()} - - return append(rows, storedRow), append(backIndexStoredEntries, &backIndexStoredEntry) -} - -func (udc *UpsideDownCouch) indexField(docID []byte, includeTermVectors bool, fieldIndex uint16, fieldLength int, tokenFreqs index.TokenFrequencies, rows []IndexRow, backIndexTermsEntries []*BackIndexTermsEntry) ([]IndexRow, []*BackIndexTermsEntry) { - fieldNorm := float32(1.0 / math.Sqrt(float64(fieldLength))) - - termFreqRows := make([]TermFrequencyRow, len(tokenFreqs)) - termFreqRowsUsed := 0 - - terms := make([]string, 0, len(tokenFreqs)) - for k, tf := range tokenFreqs { - termFreqRow := &termFreqRows[termFreqRowsUsed] - termFreqRowsUsed++ - - InitTermFrequencyRow(termFreqRow, tf.Term, fieldIndex, docID, - uint64(frequencyFromTokenFreq(tf)), fieldNorm) - - if includeTermVectors { - termFreqRow.vectors, rows = udc.termVectorsFromTokenFreq(fieldIndex, tf, rows) - } - - // record the back index entry - terms = append(terms, k) - - rows = append(rows, termFreqRow) - } - backIndexTermsEntry := BackIndexTermsEntry{Field: proto.Uint32(uint32(fieldIndex)), Terms: terms} - backIndexTermsEntries = append(backIndexTermsEntries, &backIndexTermsEntry) - - return rows, backIndexTermsEntries -} - -func (udc *UpsideDownCouch) Delete(id string) (err error) { - indexStart := time.Now() - - udc.writeMutex.Lock() - defer udc.writeMutex.Unlock() - - // open a reader for backindex lookup - var kvreader store.KVReader - kvreader, err = udc.store.Reader() - if err != nil { - return - } - - // first we lookup the backindex row for the doc id if it exists - // lookup the back index row - var backIndexRow *BackIndexRow - backIndexRow, err = backIndexRowForDoc(kvreader, index.IndexInternalID(id)) - if err != nil { - _ = kvreader.Close() - atomic.AddUint64(&udc.stats.errors, 1) - return - } - - err = kvreader.Close() - if err != nil { - return - } - - if backIndexRow == nil { - atomic.AddUint64(&udc.stats.deletes, 1) - return - } - - // start a writer for this delete - var kvwriter store.KVWriter - kvwriter, err = udc.store.Writer() - if err != nil { - return - } - defer func() { - if cerr := kvwriter.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - var deleteRowsAll [][]UpsideDownCouchRow - - deleteRows := udc.deleteSingle(id, backIndexRow, nil) - if len(deleteRows) > 0 { - deleteRowsAll = append(deleteRowsAll, deleteRows) - } - - err = udc.batchRows(kvwriter, nil, nil, deleteRowsAll) - if err == nil { - udc.m.Lock() - udc.docCount-- - udc.m.Unlock() - } - atomic.AddUint64(&udc.stats.indexTime, uint64(time.Since(indexStart))) - if err == nil { - atomic.AddUint64(&udc.stats.deletes, 1) - } else { - atomic.AddUint64(&udc.stats.errors, 1) - } - return -} - -func (udc *UpsideDownCouch) deleteSingle(id string, backIndexRow *BackIndexRow, deleteRows []UpsideDownCouchRow) []UpsideDownCouchRow { - idBytes := []byte(id) - - for _, backIndexEntry := range backIndexRow.termsEntries { - for i := range backIndexEntry.Terms { - tfr := NewTermFrequencyRow([]byte(backIndexEntry.Terms[i]), uint16(*backIndexEntry.Field), idBytes, 0, 0) - deleteRows = append(deleteRows, tfr) - } - } - for _, se := range backIndexRow.storedEntries { - sf := NewStoredRow(idBytes, uint16(*se.Field), se.ArrayPositions, 'x', nil) - deleteRows = append(deleteRows, sf) - } - - // also delete the back entry itself - deleteRows = append(deleteRows, backIndexRow) - return deleteRows -} - -func decodeFieldType(typ byte, name string, pos []uint64, value []byte) document.Field { - switch typ { - case 't': - return document.NewTextField(name, pos, value) - case 'n': - return document.NewNumericFieldFromBytes(name, pos, value) - case 'd': - return document.NewDateTimeFieldFromBytes(name, pos, value) - case 'b': - return document.NewBooleanFieldFromBytes(name, pos, value) - case 'g': - return document.NewGeoPointFieldFromBytes(name, pos, value) - } - return nil -} - -func frequencyFromTokenFreq(tf *index.TokenFreq) int { - return tf.Frequency() -} - -func (udc *UpsideDownCouch) termVectorsFromTokenFreq(field uint16, tf *index.TokenFreq, rows []IndexRow) ([]*TermVector, []IndexRow) { - a := make([]TermVector, len(tf.Locations)) - rv := make([]*TermVector, len(tf.Locations)) - - for i, l := range tf.Locations { - var newFieldRow *FieldRow - fieldIndex := field - if l.Field != "" { - // lookup correct field - fieldIndex, newFieldRow = udc.fieldIndexOrNewRow(l.Field) - if newFieldRow != nil { - rows = append(rows, newFieldRow) - } - } - a[i] = TermVector{ - field: fieldIndex, - arrayPositions: l.ArrayPositions, - pos: uint64(l.Position), - start: uint64(l.Start), - end: uint64(l.End), - } - rv[i] = &a[i] - } - - return rv, rows -} - -func (udc *UpsideDownCouch) termFieldVectorsFromTermVectors(in []*TermVector) []*index.TermFieldVector { - if len(in) == 0 { - return nil - } - - a := make([]index.TermFieldVector, len(in)) - rv := make([]*index.TermFieldVector, len(in)) - - for i, tv := range in { - fieldName := udc.fieldCache.FieldIndexed(tv.field) - a[i] = index.TermFieldVector{ - Field: fieldName, - ArrayPositions: tv.arrayPositions, - Pos: tv.pos, - Start: tv.start, - End: tv.end, - } - rv[i] = &a[i] - } - return rv -} - -func (udc *UpsideDownCouch) Batch(batch *index.Batch) (err error) { - persistedCallback := batch.PersistedCallback() - if persistedCallback != nil { - defer persistedCallback(err) - } - analysisStart := time.Now() - - resultChan := make(chan *AnalysisResult, len(batch.IndexOps)) - - var numUpdates uint64 - var numPlainTextBytes uint64 - for _, doc := range batch.IndexOps { - if doc != nil { - numUpdates++ - numPlainTextBytes += doc.NumPlainTextBytes() - } - } - - if numUpdates > 0 { - go func() { - for k := range batch.IndexOps { - doc := batch.IndexOps[k] - if doc != nil { - // put the work on the queue - udc.analysisQueue.Queue(func() { - ar := udc.analyze(doc) - resultChan <- ar - }) - } - } - }() - } - - // retrieve back index rows concurrent with analysis - docBackIndexRowErr := error(nil) - docBackIndexRowCh := make(chan *docBackIndexRow, len(batch.IndexOps)) - - udc.writeMutex.Lock() - defer udc.writeMutex.Unlock() - - go func() { - defer close(docBackIndexRowCh) - - // open a reader for backindex lookup - var kvreader store.KVReader - kvreader, err = udc.store.Reader() - if err != nil { - docBackIndexRowErr = err - return - } - defer func() { - if cerr := kvreader.Close(); err == nil && cerr != nil { - docBackIndexRowErr = cerr - } - }() - - for docID, doc := range batch.IndexOps { - backIndexRow, err := backIndexRowForDoc(kvreader, index.IndexInternalID(docID)) - if err != nil { - docBackIndexRowErr = err - return - } - - docBackIndexRowCh <- &docBackIndexRow{docID, doc, backIndexRow} - } - }() - - // wait for analysis result - newRowsMap := make(map[string][]IndexRow) - var itemsDeQueued uint64 - for itemsDeQueued < numUpdates { - result := <-resultChan - newRowsMap[result.DocID] = result.Rows - itemsDeQueued++ - } - close(resultChan) - - atomic.AddUint64(&udc.stats.analysisTime, uint64(time.Since(analysisStart))) - - docsAdded := uint64(0) - docsDeleted := uint64(0) - - indexStart := time.Now() - - // prepare a list of rows - var addRowsAll [][]UpsideDownCouchRow - var updateRowsAll [][]UpsideDownCouchRow - var deleteRowsAll [][]UpsideDownCouchRow - - // add the internal ops - var updateRows []UpsideDownCouchRow - var deleteRows []UpsideDownCouchRow - - for internalKey, internalValue := range batch.InternalOps { - if internalValue == nil { - // delete - deleteInternalRow := NewInternalRow([]byte(internalKey), nil) - deleteRows = append(deleteRows, deleteInternalRow) - } else { - updateInternalRow := NewInternalRow([]byte(internalKey), internalValue) - updateRows = append(updateRows, updateInternalRow) - } - } - - if len(updateRows) > 0 { - updateRowsAll = append(updateRowsAll, updateRows) - } - if len(deleteRows) > 0 { - deleteRowsAll = append(deleteRowsAll, deleteRows) - } - - // process back index rows as they arrive - for dbir := range docBackIndexRowCh { - if dbir.doc == nil && dbir.backIndexRow != nil { - // delete - deleteRows := udc.deleteSingle(dbir.docID, dbir.backIndexRow, nil) - if len(deleteRows) > 0 { - deleteRowsAll = append(deleteRowsAll, deleteRows) - } - docsDeleted++ - } else if dbir.doc != nil { - addRows, updateRows, deleteRows := udc.mergeOldAndNew(dbir.backIndexRow, newRowsMap[dbir.docID]) - if len(addRows) > 0 { - addRowsAll = append(addRowsAll, addRows) - } - if len(updateRows) > 0 { - updateRowsAll = append(updateRowsAll, updateRows) - } - if len(deleteRows) > 0 { - deleteRowsAll = append(deleteRowsAll, deleteRows) - } - if dbir.backIndexRow == nil { - docsAdded++ - } - } - } - - if docBackIndexRowErr != nil { - return docBackIndexRowErr - } - - // start a writer for this batch - var kvwriter store.KVWriter - kvwriter, err = udc.store.Writer() - if err != nil { - return - } - - err = udc.batchRows(kvwriter, addRowsAll, updateRowsAll, deleteRowsAll) - if err != nil { - _ = kvwriter.Close() - atomic.AddUint64(&udc.stats.errors, 1) - return - } - - err = kvwriter.Close() - - atomic.AddUint64(&udc.stats.indexTime, uint64(time.Since(indexStart))) - - if err == nil { - udc.m.Lock() - udc.docCount += docsAdded - udc.docCount -= docsDeleted - udc.m.Unlock() - atomic.AddUint64(&udc.stats.updates, numUpdates) - atomic.AddUint64(&udc.stats.deletes, docsDeleted) - atomic.AddUint64(&udc.stats.batches, 1) - atomic.AddUint64(&udc.stats.numPlainTextBytesIndexed, numPlainTextBytes) - } else { - atomic.AddUint64(&udc.stats.errors, 1) - } - - return -} - -func (udc *UpsideDownCouch) SetInternal(key, val []byte) (err error) { - internalRow := NewInternalRow(key, val) - udc.writeMutex.Lock() - defer udc.writeMutex.Unlock() - var writer store.KVWriter - writer, err = udc.store.Writer() - if err != nil { - return - } - defer func() { - if cerr := writer.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - batch := writer.NewBatch() - batch.Set(internalRow.Key(), internalRow.Value()) - - return writer.ExecuteBatch(batch) -} - -func (udc *UpsideDownCouch) DeleteInternal(key []byte) (err error) { - internalRow := NewInternalRow(key, nil) - udc.writeMutex.Lock() - defer udc.writeMutex.Unlock() - var writer store.KVWriter - writer, err = udc.store.Writer() - if err != nil { - return - } - defer func() { - if cerr := writer.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - batch := writer.NewBatch() - batch.Delete(internalRow.Key()) - return writer.ExecuteBatch(batch) -} - -func (udc *UpsideDownCouch) Reader() (index.IndexReader, error) { - kvr, err := udc.store.Reader() - if err != nil { - return nil, fmt.Errorf("error opening store reader: %v", err) - } - udc.m.RLock() - defer udc.m.RUnlock() - return &IndexReader{ - index: udc, - kvreader: kvr, - docCount: udc.docCount, - }, nil -} - -func (udc *UpsideDownCouch) Stats() json.Marshaler { - return udc.stats -} - -func (udc *UpsideDownCouch) StatsMap() map[string]interface{} { - return udc.stats.statsMap() -} - -func (udc *UpsideDownCouch) Advanced() (store.KVStore, error) { - return udc.store, nil -} - -func (udc *UpsideDownCouch) fieldIndexOrNewRow(name string) (uint16, *FieldRow) { - index, existed := udc.fieldCache.FieldNamed(name, true) - if !existed { - return index, NewFieldRow(index, name) - } - return index, nil -} - -func init() { - registry.RegisterIndexType(Name, NewUpsideDownCouch) -} - -func backIndexRowForDoc(kvreader store.KVReader, docID index.IndexInternalID) (*BackIndexRow, error) { - // use a temporary row structure to build key - tempRow := BackIndexRow{ - doc: docID, - } - - keyBuf := GetRowBuffer() - if tempRow.KeySize() > len(keyBuf) { - keyBuf = make([]byte, 2*tempRow.KeySize()) - } - defer PutRowBuffer(keyBuf) - keySize, err := tempRow.KeyTo(keyBuf) - if err != nil { - return nil, err - } - - value, err := kvreader.Get(keyBuf[:keySize]) - if err != nil { - return nil, err - } - if value == nil { - return nil, nil - } - backIndexRow, err := NewBackIndexRowKV(keyBuf[:keySize], value) - if err != nil { - return nil, err - } - return backIndexRow, nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.pb.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.pb.go deleted file mode 100644 index c161e1ccf..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.pb.go +++ /dev/null @@ -1,688 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: upsidedown.proto -// DO NOT EDIT! - -/* - Package upsidedown is a generated protocol buffer package. - - It is generated from these files: - upsidedown.proto - - It has these top-level messages: - BackIndexTermsEntry - BackIndexStoreEntry - BackIndexRowValue -*/ -package upsidedown - -import proto "github.com/golang/protobuf/proto" -import math "math" - -import io "io" -import fmt "fmt" -import github_com_golang_protobuf_proto "github.com/golang/protobuf/proto" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type BackIndexTermsEntry struct { - Field *uint32 `protobuf:"varint,1,req,name=field" json:"field,omitempty"` - Terms []string `protobuf:"bytes,2,rep,name=terms" json:"terms,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BackIndexTermsEntry) Reset() { *m = BackIndexTermsEntry{} } -func (m *BackIndexTermsEntry) String() string { return proto.CompactTextString(m) } -func (*BackIndexTermsEntry) ProtoMessage() {} - -func (m *BackIndexTermsEntry) GetField() uint32 { - if m != nil && m.Field != nil { - return *m.Field - } - return 0 -} - -func (m *BackIndexTermsEntry) GetTerms() []string { - if m != nil { - return m.Terms - } - return nil -} - -type BackIndexStoreEntry struct { - Field *uint32 `protobuf:"varint,1,req,name=field" json:"field,omitempty"` - ArrayPositions []uint64 `protobuf:"varint,2,rep,name=arrayPositions" json:"arrayPositions,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BackIndexStoreEntry) Reset() { *m = BackIndexStoreEntry{} } -func (m *BackIndexStoreEntry) String() string { return proto.CompactTextString(m) } -func (*BackIndexStoreEntry) ProtoMessage() {} - -func (m *BackIndexStoreEntry) GetField() uint32 { - if m != nil && m.Field != nil { - return *m.Field - } - return 0 -} - -func (m *BackIndexStoreEntry) GetArrayPositions() []uint64 { - if m != nil { - return m.ArrayPositions - } - return nil -} - -type BackIndexRowValue struct { - TermsEntries []*BackIndexTermsEntry `protobuf:"bytes,1,rep,name=termsEntries" json:"termsEntries,omitempty"` - StoredEntries []*BackIndexStoreEntry `protobuf:"bytes,2,rep,name=storedEntries" json:"storedEntries,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BackIndexRowValue) Reset() { *m = BackIndexRowValue{} } -func (m *BackIndexRowValue) String() string { return proto.CompactTextString(m) } -func (*BackIndexRowValue) ProtoMessage() {} - -func (m *BackIndexRowValue) GetTermsEntries() []*BackIndexTermsEntry { - if m != nil { - return m.TermsEntries - } - return nil -} - -func (m *BackIndexRowValue) GetStoredEntries() []*BackIndexStoreEntry { - if m != nil { - return m.StoredEntries - } - return nil -} - -func (m *BackIndexTermsEntry) Unmarshal(data []byte) error { - var hasFields [1]uint64 - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) - } - var v uint32 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field = &v - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Terms", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - postIndex := iNdEx + int(stringLen) - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Terms = append(m.Terms, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire - skippy, err := skipUpsidedown(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthUpsidedown - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return new(github_com_golang_protobuf_proto.RequiredNotSetError) - } - - return nil -} -func (m *BackIndexStoreEntry) Unmarshal(data []byte) error { - var hasFields [1]uint64 - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) - } - var v uint32 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field = &v - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ArrayPositions", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ArrayPositions = append(m.ArrayPositions, v) - default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire - skippy, err := skipUpsidedown(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthUpsidedown - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return new(github_com_golang_protobuf_proto.RequiredNotSetError) - } - - return nil -} -func (m *BackIndexRowValue) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TermsEntries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - postIndex := iNdEx + msglen - if msglen < 0 { - return ErrInvalidLengthUpsidedown - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TermsEntries = append(m.TermsEntries, &BackIndexTermsEntry{}) - if err := m.TermsEntries[len(m.TermsEntries)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoredEntries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - postIndex := iNdEx + msglen - if msglen < 0 { - return ErrInvalidLengthUpsidedown - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoredEntries = append(m.StoredEntries, &BackIndexStoreEntry{}) - if err := m.StoredEntries[len(m.StoredEntries)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - var sizeOfWire int - for { - sizeOfWire++ - wire >>= 7 - if wire == 0 { - break - } - } - iNdEx -= sizeOfWire - skippy, err := skipUpsidedown(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthUpsidedown - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - return nil -} -func skipUpsidedown(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthUpsidedown - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipUpsidedown(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthUpsidedown = fmt.Errorf("proto: negative length found during unmarshaling") -) - -func (m *BackIndexTermsEntry) Size() (n int) { - var l int - _ = l - if m.Field != nil { - n += 1 + sovUpsidedown(uint64(*m.Field)) - } - if len(m.Terms) > 0 { - for _, s := range m.Terms { - l = len(s) - n += 1 + l + sovUpsidedown(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BackIndexStoreEntry) Size() (n int) { - var l int - _ = l - if m.Field != nil { - n += 1 + sovUpsidedown(uint64(*m.Field)) - } - if len(m.ArrayPositions) > 0 { - for _, e := range m.ArrayPositions { - n += 1 + sovUpsidedown(uint64(e)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BackIndexRowValue) Size() (n int) { - var l int - _ = l - if len(m.TermsEntries) > 0 { - for _, e := range m.TermsEntries { - l = e.Size() - n += 1 + l + sovUpsidedown(uint64(l)) - } - } - if len(m.StoredEntries) > 0 { - for _, e := range m.StoredEntries { - l = e.Size() - n += 1 + l + sovUpsidedown(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovUpsidedown(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozUpsidedown(x uint64) (n int) { - return sovUpsidedown(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *BackIndexTermsEntry) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *BackIndexTermsEntry) MarshalTo(data []byte) (n int, err error) { - var i int - _ = i - var l int - _ = l - if m.Field == nil { - return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError) - } else { - data[i] = 0x8 - i++ - i = encodeVarintUpsidedown(data, i, uint64(*m.Field)) - } - if len(m.Terms) > 0 { - for _, s := range m.Terms { - data[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *BackIndexStoreEntry) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *BackIndexStoreEntry) MarshalTo(data []byte) (n int, err error) { - var i int - _ = i - var l int - _ = l - if m.Field == nil { - return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError) - } else { - data[i] = 0x8 - i++ - i = encodeVarintUpsidedown(data, i, uint64(*m.Field)) - } - if len(m.ArrayPositions) > 0 { - for _, num := range m.ArrayPositions { - data[i] = 0x10 - i++ - i = encodeVarintUpsidedown(data, i, uint64(num)) - } - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *BackIndexRowValue) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *BackIndexRowValue) MarshalTo(data []byte) (n int, err error) { - var i int - _ = i - var l int - _ = l - if len(m.TermsEntries) > 0 { - for _, msg := range m.TermsEntries { - data[i] = 0xa - i++ - i = encodeVarintUpsidedown(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.StoredEntries) > 0 { - for _, msg := range m.StoredEntries { - data[i] = 0x12 - i++ - i = encodeVarintUpsidedown(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Upsidedown(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Upsidedown(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintUpsidedown(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.proto b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.proto deleted file mode 100644 index cf0492a2d..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.proto +++ /dev/null @@ -1,14 +0,0 @@ -message BackIndexTermsEntry { - required uint32 field = 1; - repeated string terms = 2; -} - -message BackIndexStoreEntry { - required uint32 field = 1; - repeated uint64 arrayPositions = 2; -} - -message BackIndexRowValue { - repeated BackIndexTermsEntry termsEntries = 1; - repeated BackIndexStoreEntry storedEntries = 2; -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index_alias.go b/vendor/github.com/blevesearch/bleve/v2/index_alias.go deleted file mode 100644 index 7a85d7213..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index_alias.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -// An IndexAlias is a wrapper around one or more -// Index objects. It has two distinct modes of -// operation. -// 1. When it points to a single index, ALL index -// operations are valid and will be passed through -// to the underlying index. -// 2. When it points to more than one index, the only -// valid operation is Search. In this case the -// search will be performed across all the -// underlying indexes and the results merged. -// Calls to Add/Remove/Swap the underlying indexes -// are atomic, so you can safely change the -// underlying Index objects while other components -// are performing operations. -type IndexAlias interface { - Index - - Add(i ...Index) - Remove(i ...Index) - Swap(in, out []Index) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index_alias_impl.go b/vendor/github.com/blevesearch/bleve/v2/index_alias_impl.go deleted file mode 100644 index 5a4dc5a48..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index_alias_impl.go +++ /dev/null @@ -1,612 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -import ( - "context" - "sync" - "time" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" -) - -type indexAliasImpl struct { - name string - indexes []Index - mutex sync.RWMutex - open bool -} - -// NewIndexAlias creates a new IndexAlias over the provided -// Index objects. -func NewIndexAlias(indexes ...Index) *indexAliasImpl { - return &indexAliasImpl{ - name: "alias", - indexes: indexes, - open: true, - } -} - -// VisitIndexes invokes the visit callback on every -// indexes included in the index alias. -func (i *indexAliasImpl) VisitIndexes(visit func(Index)) { - i.mutex.RLock() - for _, idx := range i.indexes { - visit(idx) - } - i.mutex.RUnlock() -} - -func (i *indexAliasImpl) isAliasToSingleIndex() error { - if len(i.indexes) < 1 { - return ErrorAliasEmpty - } else if len(i.indexes) > 1 { - return ErrorAliasMulti - } - return nil -} - -func (i *indexAliasImpl) Index(id string, data interface{}) error { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - return err - } - - return i.indexes[0].Index(id, data) -} - -func (i *indexAliasImpl) Delete(id string) error { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - return err - } - - return i.indexes[0].Delete(id) -} - -func (i *indexAliasImpl) Batch(b *Batch) error { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - return err - } - - return i.indexes[0].Batch(b) -} - -func (i *indexAliasImpl) Document(id string) (index.Document, error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil, ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - return nil, err - } - - return i.indexes[0].Document(id) -} - -func (i *indexAliasImpl) DocCount() (uint64, error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - rv := uint64(0) - - if !i.open { - return 0, ErrorIndexClosed - } - - for _, index := range i.indexes { - otherCount, err := index.DocCount() - if err == nil { - rv += otherCount - } - // tolerate errors to produce partial counts - } - - return rv, nil -} - -func (i *indexAliasImpl) Search(req *SearchRequest) (*SearchResult, error) { - return i.SearchInContext(context.Background(), req) -} - -func (i *indexAliasImpl) SearchInContext(ctx context.Context, req *SearchRequest) (*SearchResult, error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil, ErrorIndexClosed - } - - if len(i.indexes) < 1 { - return nil, ErrorAliasEmpty - } - - // short circuit the simple case - if len(i.indexes) == 1 { - return i.indexes[0].SearchInContext(ctx, req) - } - - return MultiSearch(ctx, req, i.indexes...) -} - -func (i *indexAliasImpl) Fields() ([]string, error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil, ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - return nil, err - } - - return i.indexes[0].Fields() -} - -func (i *indexAliasImpl) FieldDict(field string) (index.FieldDict, error) { - i.mutex.RLock() - - if !i.open { - i.mutex.RUnlock() - return nil, ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - fieldDict, err := i.indexes[0].FieldDict(field) - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - return &indexAliasImplFieldDict{ - index: i, - fieldDict: fieldDict, - }, nil -} - -func (i *indexAliasImpl) FieldDictRange(field string, startTerm []byte, endTerm []byte) (index.FieldDict, error) { - i.mutex.RLock() - - if !i.open { - i.mutex.RUnlock() - return nil, ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - fieldDict, err := i.indexes[0].FieldDictRange(field, startTerm, endTerm) - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - return &indexAliasImplFieldDict{ - index: i, - fieldDict: fieldDict, - }, nil -} - -func (i *indexAliasImpl) FieldDictPrefix(field string, termPrefix []byte) (index.FieldDict, error) { - i.mutex.RLock() - - if !i.open { - i.mutex.RUnlock() - return nil, ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - fieldDict, err := i.indexes[0].FieldDictPrefix(field, termPrefix) - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - return &indexAliasImplFieldDict{ - index: i, - fieldDict: fieldDict, - }, nil -} - -func (i *indexAliasImpl) Close() error { - i.mutex.Lock() - defer i.mutex.Unlock() - - i.open = false - return nil -} - -func (i *indexAliasImpl) Mapping() mapping.IndexMapping { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil - } - - err := i.isAliasToSingleIndex() - if err != nil { - return nil - } - - return i.indexes[0].Mapping() -} - -func (i *indexAliasImpl) Stats() *IndexStat { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil - } - - err := i.isAliasToSingleIndex() - if err != nil { - return nil - } - - return i.indexes[0].Stats() -} - -func (i *indexAliasImpl) StatsMap() map[string]interface{} { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil - } - - err := i.isAliasToSingleIndex() - if err != nil { - return nil - } - - return i.indexes[0].StatsMap() -} - -func (i *indexAliasImpl) GetInternal(key []byte) ([]byte, error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil, ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - return nil, err - } - - return i.indexes[0].GetInternal(key) -} - -func (i *indexAliasImpl) SetInternal(key, val []byte) error { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - return err - } - - return i.indexes[0].SetInternal(key, val) -} - -func (i *indexAliasImpl) DeleteInternal(key []byte) error { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - return err - } - - return i.indexes[0].DeleteInternal(key) -} - -func (i *indexAliasImpl) Advanced() (index.Index, error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil, ErrorIndexClosed - } - - err := i.isAliasToSingleIndex() - if err != nil { - return nil, err - } - - return i.indexes[0].Advanced() -} - -func (i *indexAliasImpl) Add(indexes ...Index) { - i.mutex.Lock() - defer i.mutex.Unlock() - - i.indexes = append(i.indexes, indexes...) -} - -func (i *indexAliasImpl) removeSingle(index Index) { - for pos, in := range i.indexes { - if in == index { - i.indexes = append(i.indexes[:pos], i.indexes[pos+1:]...) - break - } - } -} - -func (i *indexAliasImpl) Remove(indexes ...Index) { - i.mutex.Lock() - defer i.mutex.Unlock() - - for _, in := range indexes { - i.removeSingle(in) - } -} - -func (i *indexAliasImpl) Swap(in, out []Index) { - i.mutex.Lock() - defer i.mutex.Unlock() - - // add - i.indexes = append(i.indexes, in...) - - // delete - for _, ind := range out { - i.removeSingle(ind) - } -} - -// createChildSearchRequest creates a separate -// request from the original -// For now, avoid data race on req structure. -// TODO disable highlight/field load on child -// requests, and add code to do this only on -// the actual final results. -// Perhaps that part needs to be optional, -// could be slower in remote usages. -func createChildSearchRequest(req *SearchRequest) *SearchRequest { - rv := SearchRequest{ - Query: req.Query, - Size: req.Size + req.From, - From: 0, - Highlight: req.Highlight, - Fields: req.Fields, - Facets: req.Facets, - Explain: req.Explain, - Sort: req.Sort.Copy(), - IncludeLocations: req.IncludeLocations, - Score: req.Score, - SearchAfter: req.SearchAfter, - SearchBefore: req.SearchBefore, - } - return &rv -} - -type asyncSearchResult struct { - Name string - Result *SearchResult - Err error -} - -// MultiSearch executes a SearchRequest across multiple Index objects, -// then merges the results. The indexes must honor any ctx deadline. -func MultiSearch(ctx context.Context, req *SearchRequest, indexes ...Index) (*SearchResult, error) { - - searchStart := time.Now() - asyncResults := make(chan *asyncSearchResult, len(indexes)) - - var reverseQueryExecution bool - if req.SearchBefore != nil { - reverseQueryExecution = true - req.Sort.Reverse() - req.SearchAfter = req.SearchBefore - req.SearchBefore = nil - } - - // run search on each index in separate go routine - var waitGroup sync.WaitGroup - - var searchChildIndex = func(in Index, childReq *SearchRequest) { - rv := asyncSearchResult{Name: in.Name()} - rv.Result, rv.Err = in.SearchInContext(ctx, childReq) - asyncResults <- &rv - waitGroup.Done() - } - - waitGroup.Add(len(indexes)) - for _, in := range indexes { - go searchChildIndex(in, createChildSearchRequest(req)) - } - - // on another go routine, close after finished - go func() { - waitGroup.Wait() - close(asyncResults) - }() - - var sr *SearchResult - indexErrors := make(map[string]error) - - for asr := range asyncResults { - if asr.Err == nil { - if sr == nil { - // first result - sr = asr.Result - } else { - // merge with previous - sr.Merge(asr.Result) - } - } else { - indexErrors[asr.Name] = asr.Err - } - } - - // merge just concatenated all the hits - // now lets clean it up - - // handle case where no results were successful - if sr == nil { - sr = &SearchResult{ - Status: &SearchStatus{ - Errors: make(map[string]error), - }, - } - } - - sortFunc := req.SortFunc() - // sort all hits with the requested order - if len(req.Sort) > 0 { - sorter := newSearchHitSorter(req.Sort, sr.Hits) - sortFunc(sorter) - } - - // now skip over the correct From - if req.From > 0 && len(sr.Hits) > req.From { - sr.Hits = sr.Hits[req.From:] - } else if req.From > 0 { - sr.Hits = search.DocumentMatchCollection{} - } - - // now trim to the correct size - if req.Size > 0 && len(sr.Hits) > req.Size { - sr.Hits = sr.Hits[0:req.Size] - } - - // fix up facets - for name, fr := range req.Facets { - sr.Facets.Fixup(name, fr.Size) - } - - if reverseQueryExecution { - // reverse the sort back to the original - req.Sort.Reverse() - // resort using the original order - mhs := newSearchHitSorter(req.Sort, sr.Hits) - sortFunc(mhs) - // reset request - req.SearchBefore = req.SearchAfter - req.SearchAfter = nil - } - - // fix up original request - sr.Request = req - searchDuration := time.Since(searchStart) - sr.Took = searchDuration - - // fix up errors - if len(indexErrors) > 0 { - if sr.Status.Errors == nil { - sr.Status.Errors = make(map[string]error) - } - for indexName, indexErr := range indexErrors { - sr.Status.Errors[indexName] = indexErr - sr.Status.Total++ - sr.Status.Failed++ - } - } - - return sr, nil -} - -func (i *indexAliasImpl) NewBatch() *Batch { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil - } - - err := i.isAliasToSingleIndex() - if err != nil { - return nil - } - - return i.indexes[0].NewBatch() -} - -func (i *indexAliasImpl) Name() string { - return i.name -} - -func (i *indexAliasImpl) SetName(name string) { - i.name = name -} - -type indexAliasImplFieldDict struct { - index *indexAliasImpl - fieldDict index.FieldDict -} - -func (f *indexAliasImplFieldDict) Next() (*index.DictEntry, error) { - return f.fieldDict.Next() -} - -func (f *indexAliasImplFieldDict) Close() error { - defer f.index.mutex.RUnlock() - return f.fieldDict.Close() -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index_impl.go b/vendor/github.com/blevesearch/bleve/v2/index_impl.go deleted file mode 100644 index 879a36634..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index_impl.go +++ /dev/null @@ -1,912 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -import ( - "context" - "encoding/json" - "fmt" - "os" - "sync" - "sync/atomic" - "time" - - "github.com/blevesearch/bleve/v2/document" - "github.com/blevesearch/bleve/v2/index/upsidedown" - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/collector" - "github.com/blevesearch/bleve/v2/search/facet" - "github.com/blevesearch/bleve/v2/search/highlight" - index "github.com/blevesearch/bleve_index_api" -) - -type indexImpl struct { - path string - name string - meta *indexMeta - i index.Index - m mapping.IndexMapping - mutex sync.RWMutex - open bool - stats *IndexStat -} - -const storePath = "store" - -var mappingInternalKey = []byte("_mapping") - -const SearchQueryStartCallbackKey = "_search_query_start_callback_key" -const SearchQueryEndCallbackKey = "_search_query_end_callback_key" - -type SearchQueryStartCallbackFn func(size uint64) error -type SearchQueryEndCallbackFn func(size uint64) error - -func indexStorePath(path string) string { - return path + string(os.PathSeparator) + storePath -} - -func newIndexUsing(path string, mapping mapping.IndexMapping, indexType string, kvstore string, kvconfig map[string]interface{}) (*indexImpl, error) { - // first validate the mapping - err := mapping.Validate() - if err != nil { - return nil, err - } - - if kvconfig == nil { - kvconfig = map[string]interface{}{} - } - - if kvstore == "" { - return nil, fmt.Errorf("bleve not configured for file based indexing") - } - - rv := indexImpl{ - path: path, - name: path, - m: mapping, - meta: newIndexMeta(indexType, kvstore, kvconfig), - } - rv.stats = &IndexStat{i: &rv} - // at this point there is hope that we can be successful, so save index meta - if path != "" { - err = rv.meta.Save(path) - if err != nil { - return nil, err - } - kvconfig["create_if_missing"] = true - kvconfig["error_if_exists"] = true - kvconfig["path"] = indexStorePath(path) - } else { - kvconfig["path"] = "" - } - - // open the index - indexTypeConstructor := registry.IndexTypeConstructorByName(rv.meta.IndexType) - if indexTypeConstructor == nil { - return nil, ErrorUnknownIndexType - } - - rv.i, err = indexTypeConstructor(rv.meta.Storage, kvconfig, Config.analysisQueue) - if err != nil { - return nil, err - } - err = rv.i.Open() - if err != nil { - return nil, err - } - defer func(rv *indexImpl) { - if !rv.open { - rv.i.Close() - } - }(&rv) - - // now persist the mapping - mappingBytes, err := json.Marshal(mapping) - if err != nil { - return nil, err - } - err = rv.i.SetInternal(mappingInternalKey, mappingBytes) - if err != nil { - return nil, err - } - - // mark the index as open - rv.mutex.Lock() - defer rv.mutex.Unlock() - rv.open = true - indexStats.Register(&rv) - return &rv, nil -} - -func openIndexUsing(path string, runtimeConfig map[string]interface{}) (rv *indexImpl, err error) { - rv = &indexImpl{ - path: path, - name: path, - } - rv.stats = &IndexStat{i: rv} - - rv.meta, err = openIndexMeta(path) - if err != nil { - return nil, err - } - - // backwards compatibility if index type is missing - if rv.meta.IndexType == "" { - rv.meta.IndexType = upsidedown.Name - } - - storeConfig := rv.meta.Config - if storeConfig == nil { - storeConfig = map[string]interface{}{} - } - - storeConfig["path"] = indexStorePath(path) - storeConfig["create_if_missing"] = false - storeConfig["error_if_exists"] = false - for rck, rcv := range runtimeConfig { - storeConfig[rck] = rcv - } - - // open the index - indexTypeConstructor := registry.IndexTypeConstructorByName(rv.meta.IndexType) - if indexTypeConstructor == nil { - return nil, ErrorUnknownIndexType - } - - rv.i, err = indexTypeConstructor(rv.meta.Storage, storeConfig, Config.analysisQueue) - if err != nil { - return nil, err - } - err = rv.i.Open() - if err != nil { - return nil, err - } - defer func(rv *indexImpl) { - if !rv.open { - rv.i.Close() - } - }(rv) - - // now load the mapping - indexReader, err := rv.i.Reader() - if err != nil { - return nil, err - } - defer func() { - if cerr := indexReader.Close(); cerr != nil && err == nil { - err = cerr - } - }() - - mappingBytes, err := indexReader.GetInternal(mappingInternalKey) - if err != nil { - return nil, err - } - - var im *mapping.IndexMappingImpl - err = json.Unmarshal(mappingBytes, &im) - if err != nil { - return nil, fmt.Errorf("error parsing mapping JSON: %v\nmapping contents:\n%s", err, string(mappingBytes)) - } - - // mark the index as open - rv.mutex.Lock() - defer rv.mutex.Unlock() - rv.open = true - - // validate the mapping - err = im.Validate() - if err != nil { - // note even if the mapping is invalid - // we still return an open usable index - return rv, err - } - - rv.m = im - indexStats.Register(rv) - return rv, err -} - -// Advanced returns internal index implementation -func (i *indexImpl) Advanced() (index.Index, error) { - return i.i, nil -} - -// Mapping returns the IndexMapping in use by this -// Index. -func (i *indexImpl) Mapping() mapping.IndexMapping { - return i.m -} - -// Index the object with the specified identifier. -// The IndexMapping for this index will determine -// how the object is indexed. -func (i *indexImpl) Index(id string, data interface{}) (err error) { - if id == "" { - return ErrorEmptyID - } - - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - doc := document.NewDocument(id) - err = i.m.MapDocument(doc, data) - if err != nil { - return - } - err = i.i.Update(doc) - return -} - -// IndexAdvanced takes a document.Document object -// skips the mapping and indexes it. -func (i *indexImpl) IndexAdvanced(doc *document.Document) (err error) { - if doc.ID() == "" { - return ErrorEmptyID - } - - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - err = i.i.Update(doc) - return -} - -// Delete entries for the specified identifier from -// the index. -func (i *indexImpl) Delete(id string) (err error) { - if id == "" { - return ErrorEmptyID - } - - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - err = i.i.Delete(id) - return -} - -// Batch executes multiple Index and Delete -// operations at the same time. There are often -// significant performance benefits when performing -// operations in a batch. -func (i *indexImpl) Batch(b *Batch) error { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - return i.i.Batch(b.internal) -} - -// Document is used to find the values of all the -// stored fields for a document in the index. These -// stored fields are put back into a Document object -// and returned. -func (i *indexImpl) Document(id string) (doc index.Document, err error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil, ErrorIndexClosed - } - indexReader, err := i.i.Reader() - if err != nil { - return nil, err - } - defer func() { - if cerr := indexReader.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - doc, err = indexReader.Document(id) - if err != nil { - return nil, err - } - return doc, nil -} - -// DocCount returns the number of documents in the -// index. -func (i *indexImpl) DocCount() (count uint64, err error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return 0, ErrorIndexClosed - } - - // open a reader for this search - indexReader, err := i.i.Reader() - if err != nil { - return 0, fmt.Errorf("error opening index reader %v", err) - } - defer func() { - if cerr := indexReader.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - count, err = indexReader.DocCount() - return -} - -// Search executes a search request operation. -// Returns a SearchResult object or an error. -func (i *indexImpl) Search(req *SearchRequest) (sr *SearchResult, err error) { - return i.SearchInContext(context.Background(), req) -} - -var documentMatchEmptySize int -var searchContextEmptySize int -var facetResultEmptySize int -var documentEmptySize int - -func init() { - var dm search.DocumentMatch - documentMatchEmptySize = dm.Size() - - var sc search.SearchContext - searchContextEmptySize = sc.Size() - - var fr search.FacetResult - facetResultEmptySize = fr.Size() - - var d document.Document - documentEmptySize = d.Size() -} - -// memNeededForSearch is a helper function that returns an estimate of RAM -// needed to execute a search request. -func memNeededForSearch(req *SearchRequest, - searcher search.Searcher, - topnCollector *collector.TopNCollector) uint64 { - - backingSize := req.Size + req.From + 1 - if req.Size+req.From > collector.PreAllocSizeSkipCap { - backingSize = collector.PreAllocSizeSkipCap + 1 - } - numDocMatches := backingSize + searcher.DocumentMatchPoolSize() - - estimate := 0 - - // overhead, size in bytes from collector - estimate += topnCollector.Size() - - // pre-allocing DocumentMatchPool - estimate += searchContextEmptySize + numDocMatches*documentMatchEmptySize - - // searcher overhead - estimate += searcher.Size() - - // overhead from results, lowestMatchOutsideResults - estimate += (numDocMatches + 1) * documentMatchEmptySize - - // additional overhead from SearchResult - estimate += reflectStaticSizeSearchResult + reflectStaticSizeSearchStatus - - // overhead from facet results - if req.Facets != nil { - estimate += len(req.Facets) * facetResultEmptySize - } - - // highlighting, store - if len(req.Fields) > 0 || req.Highlight != nil { - // Size + From => number of hits - estimate += (req.Size + req.From) * documentEmptySize - } - - return uint64(estimate) -} - -// SearchInContext executes a search request operation within the provided -// Context. Returns a SearchResult object or an error. -func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr *SearchResult, err error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - searchStart := time.Now() - - if !i.open { - return nil, ErrorIndexClosed - } - - var reverseQueryExecution bool - if req.SearchBefore != nil { - reverseQueryExecution = true - req.Sort.Reverse() - req.SearchAfter = req.SearchBefore - req.SearchBefore = nil - } - - var coll *collector.TopNCollector - if req.SearchAfter != nil { - coll = collector.NewTopNCollectorAfter(req.Size, req.Sort, req.SearchAfter) - } else { - coll = collector.NewTopNCollector(req.Size, req.From, req.Sort) - } - - // open a reader for this search - indexReader, err := i.i.Reader() - if err != nil { - return nil, fmt.Errorf("error opening index reader %v", err) - } - defer func() { - if cerr := indexReader.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - searcher, err := req.Query.Searcher(indexReader, i.m, search.SearcherOptions{ - Explain: req.Explain, - IncludeTermVectors: req.IncludeLocations || req.Highlight != nil, - Score: req.Score, - }) - if err != nil { - return nil, err - } - defer func() { - if serr := searcher.Close(); err == nil && serr != nil { - err = serr - } - }() - - if req.Facets != nil { - facetsBuilder := search.NewFacetsBuilder(indexReader) - for facetName, facetRequest := range req.Facets { - if facetRequest.NumericRanges != nil { - // build numeric range facet - facetBuilder := facet.NewNumericFacetBuilder(facetRequest.Field, facetRequest.Size) - for _, nr := range facetRequest.NumericRanges { - facetBuilder.AddRange(nr.Name, nr.Min, nr.Max) - } - facetsBuilder.Add(facetName, facetBuilder) - } else if facetRequest.DateTimeRanges != nil { - // build date range facet - facetBuilder := facet.NewDateTimeFacetBuilder(facetRequest.Field, facetRequest.Size) - dateTimeParser := i.m.DateTimeParserNamed("") - for _, dr := range facetRequest.DateTimeRanges { - start, end := dr.ParseDates(dateTimeParser) - facetBuilder.AddRange(dr.Name, start, end) - } - facetsBuilder.Add(facetName, facetBuilder) - } else { - // build terms facet - facetBuilder := facet.NewTermsFacetBuilder(facetRequest.Field, facetRequest.Size) - facetsBuilder.Add(facetName, facetBuilder) - } - } - coll.SetFacetsBuilder(facetsBuilder) - } - - memNeeded := memNeededForSearch(req, searcher, coll) - if cb := ctx.Value(SearchQueryStartCallbackKey); cb != nil { - if cbF, ok := cb.(SearchQueryStartCallbackFn); ok { - err = cbF(memNeeded) - } - } - if err != nil { - return nil, err - } - - if cb := ctx.Value(SearchQueryEndCallbackKey); cb != nil { - if cbF, ok := cb.(SearchQueryEndCallbackFn); ok { - defer func() { - _ = cbF(memNeeded) - }() - } - } - - err = coll.Collect(ctx, searcher, indexReader) - if err != nil { - return nil, err - } - - hits := coll.Results() - - var highlighter highlight.Highlighter - - if req.Highlight != nil { - // get the right highlighter - highlighter, err = Config.Cache.HighlighterNamed(Config.DefaultHighlighter) - if err != nil { - return nil, err - } - if req.Highlight.Style != nil { - highlighter, err = Config.Cache.HighlighterNamed(*req.Highlight.Style) - if err != nil { - return nil, err - } - } - if highlighter == nil { - return nil, fmt.Errorf("no highlighter named `%s` registered", *req.Highlight.Style) - } - } - - for _, hit := range hits { - if i.name != "" { - hit.Index = i.name - } - err = LoadAndHighlightFields(hit, req, i.name, indexReader, highlighter) - if err != nil { - return nil, err - } - } - - atomic.AddUint64(&i.stats.searches, 1) - searchDuration := time.Since(searchStart) - atomic.AddUint64(&i.stats.searchTime, uint64(searchDuration)) - - if Config.SlowSearchLogThreshold > 0 && - searchDuration > Config.SlowSearchLogThreshold { - logger.Printf("slow search took %s - %v", searchDuration, req) - } - - if reverseQueryExecution { - // reverse the sort back to the original - req.Sort.Reverse() - // resort using the original order - mhs := newSearchHitSorter(req.Sort, hits) - req.SortFunc()(mhs) - // reset request - req.SearchBefore = req.SearchAfter - req.SearchAfter = nil - } - - return &SearchResult{ - Status: &SearchStatus{ - Total: 1, - Successful: 1, - }, - Request: req, - Hits: hits, - Total: coll.Total(), - MaxScore: coll.MaxScore(), - Took: searchDuration, - Facets: coll.FacetResults(), - }, nil -} - -func LoadAndHighlightFields(hit *search.DocumentMatch, req *SearchRequest, - indexName string, r index.IndexReader, - highlighter highlight.Highlighter) error { - if len(req.Fields) > 0 || highlighter != nil { - doc, err := r.Document(hit.ID) - if err == nil && doc != nil { - if len(req.Fields) > 0 { - fieldsToLoad := deDuplicate(req.Fields) - for _, f := range fieldsToLoad { - doc.VisitFields(func(docF index.Field) { - if f == "*" || docF.Name() == f { - var value interface{} - switch docF := docF.(type) { - case index.TextField: - value = docF.Text() - case index.NumericField: - num, err := docF.Number() - if err == nil { - value = num - } - case index.DateTimeField: - datetime, err := docF.DateTime() - if err == nil { - value = datetime.Format(time.RFC3339) - } - case index.BooleanField: - boolean, err := docF.Boolean() - if err == nil { - value = boolean - } - case index.GeoPointField: - lon, err := docF.Lon() - if err == nil { - lat, err := docF.Lat() - if err == nil { - value = []float64{lon, lat} - } - } - } - if value != nil { - hit.AddFieldValue(docF.Name(), value) - } - } - }) - } - } - if highlighter != nil { - highlightFields := req.Highlight.Fields - if highlightFields == nil { - // add all fields with matches - highlightFields = make([]string, 0, len(hit.Locations)) - for k := range hit.Locations { - highlightFields = append(highlightFields, k) - } - } - for _, hf := range highlightFields { - highlighter.BestFragmentsInField(hit, doc, hf, 1) - } - } - } else if doc == nil { - // unexpected case, a doc ID that was found as a search hit - // was unable to be found during document lookup - return ErrorIndexReadInconsistency - } - } - - return nil -} - -// Fields returns the name of all the fields this -// Index has operated on. -func (i *indexImpl) Fields() (fields []string, err error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil, ErrorIndexClosed - } - - indexReader, err := i.i.Reader() - if err != nil { - return nil, err - } - defer func() { - if cerr := indexReader.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - fields, err = indexReader.Fields() - if err != nil { - return nil, err - } - return fields, nil -} - -func (i *indexImpl) FieldDict(field string) (index.FieldDict, error) { - i.mutex.RLock() - - if !i.open { - i.mutex.RUnlock() - return nil, ErrorIndexClosed - } - - indexReader, err := i.i.Reader() - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - fieldDict, err := indexReader.FieldDict(field) - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - return &indexImplFieldDict{ - index: i, - indexReader: indexReader, - fieldDict: fieldDict, - }, nil -} - -func (i *indexImpl) FieldDictRange(field string, startTerm []byte, endTerm []byte) (index.FieldDict, error) { - i.mutex.RLock() - - if !i.open { - i.mutex.RUnlock() - return nil, ErrorIndexClosed - } - - indexReader, err := i.i.Reader() - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - fieldDict, err := indexReader.FieldDictRange(field, startTerm, endTerm) - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - return &indexImplFieldDict{ - index: i, - indexReader: indexReader, - fieldDict: fieldDict, - }, nil -} - -func (i *indexImpl) FieldDictPrefix(field string, termPrefix []byte) (index.FieldDict, error) { - i.mutex.RLock() - - if !i.open { - i.mutex.RUnlock() - return nil, ErrorIndexClosed - } - - indexReader, err := i.i.Reader() - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - fieldDict, err := indexReader.FieldDictPrefix(field, termPrefix) - if err != nil { - i.mutex.RUnlock() - return nil, err - } - - return &indexImplFieldDict{ - index: i, - indexReader: indexReader, - fieldDict: fieldDict, - }, nil -} - -func (i *indexImpl) Close() error { - i.mutex.Lock() - defer i.mutex.Unlock() - - indexStats.UnRegister(i) - - i.open = false - return i.i.Close() -} - -func (i *indexImpl) Stats() *IndexStat { - return i.stats -} - -func (i *indexImpl) StatsMap() map[string]interface{} { - return i.stats.statsMap() -} - -func (i *indexImpl) GetInternal(key []byte) (val []byte, err error) { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return nil, ErrorIndexClosed - } - - reader, err := i.i.Reader() - if err != nil { - return nil, err - } - defer func() { - if cerr := reader.Close(); err == nil && cerr != nil { - err = cerr - } - }() - - val, err = reader.GetInternal(key) - if err != nil { - return nil, err - } - return val, nil -} - -func (i *indexImpl) SetInternal(key, val []byte) error { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - return i.i.SetInternal(key, val) -} - -func (i *indexImpl) DeleteInternal(key []byte) error { - i.mutex.RLock() - defer i.mutex.RUnlock() - - if !i.open { - return ErrorIndexClosed - } - - return i.i.DeleteInternal(key) -} - -// NewBatch creates a new empty batch. -func (i *indexImpl) NewBatch() *Batch { - return &Batch{ - index: i, - internal: index.NewBatch(), - } -} - -func (i *indexImpl) Name() string { - return i.name -} - -func (i *indexImpl) SetName(name string) { - indexStats.UnRegister(i) - i.name = name - indexStats.Register(i) -} - -type indexImplFieldDict struct { - index *indexImpl - indexReader index.IndexReader - fieldDict index.FieldDict -} - -func (f *indexImplFieldDict) Next() (*index.DictEntry, error) { - return f.fieldDict.Next() -} - -func (f *indexImplFieldDict) Close() error { - defer f.index.mutex.RUnlock() - err := f.fieldDict.Close() - if err != nil { - return err - } - return f.indexReader.Close() -} - -// helper function to remove duplicate entries from slice of strings -func deDuplicate(fields []string) []string { - entries := make(map[string]struct{}) - ret := []string{} - for _, entry := range fields { - if _, exists := entries[entry]; !exists { - entries[entry] = struct{}{} - ret = append(ret, entry) - } - } - return ret -} - -type searchHitSorter struct { - hits search.DocumentMatchCollection - sort search.SortOrder - cachedScoring []bool - cachedDesc []bool -} - -func newSearchHitSorter(sort search.SortOrder, hits search.DocumentMatchCollection) *searchHitSorter { - return &searchHitSorter{ - sort: sort, - hits: hits, - cachedScoring: sort.CacheIsScore(), - cachedDesc: sort.CacheDescending(), - } -} - -func (m *searchHitSorter) Len() int { return len(m.hits) } -func (m *searchHitSorter) Swap(i, j int) { m.hits[i], m.hits[j] = m.hits[j], m.hits[i] } -func (m *searchHitSorter) Less(i, j int) bool { - c := m.sort.Compare(m.cachedScoring, m.cachedDesc, m.hits[i], m.hits[j]) - return c < 0 -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index_meta.go b/vendor/github.com/blevesearch/bleve/v2/index_meta.go deleted file mode 100644 index fe0ddebac..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index_meta.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - - "github.com/blevesearch/bleve/v2/index/upsidedown" -) - -const metaFilename = "index_meta.json" - -type indexMeta struct { - Storage string `json:"storage"` - IndexType string `json:"index_type"` - Config map[string]interface{} `json:"config,omitempty"` -} - -func newIndexMeta(indexType string, storage string, config map[string]interface{}) *indexMeta { - return &indexMeta{ - IndexType: indexType, - Storage: storage, - Config: config, - } -} - -func openIndexMeta(path string) (*indexMeta, error) { - if _, err := os.Stat(path); os.IsNotExist(err) { - return nil, ErrorIndexPathDoesNotExist - } - indexMetaPath := indexMetaPath(path) - metaBytes, err := ioutil.ReadFile(indexMetaPath) - if err != nil { - return nil, ErrorIndexMetaMissing - } - var im indexMeta - err = json.Unmarshal(metaBytes, &im) - if err != nil { - return nil, ErrorIndexMetaCorrupt - } - if im.IndexType == "" { - im.IndexType = upsidedown.Name - } - return &im, nil -} - -func (i *indexMeta) Save(path string) (err error) { - indexMetaPath := indexMetaPath(path) - // ensure any necessary parent directories exist - err = os.MkdirAll(path, 0700) - if err != nil { - if os.IsExist(err) { - return ErrorIndexPathExists - } - return err - } - metaBytes, err := json.Marshal(i) - if err != nil { - return err - } - indexMetaFile, err := os.OpenFile(indexMetaPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) - if err != nil { - if os.IsExist(err) { - return ErrorIndexPathExists - } - return err - } - defer func() { - if ierr := indexMetaFile.Close(); err == nil && ierr != nil { - err = ierr - } - }() - _, err = indexMetaFile.Write(metaBytes) - if err != nil { - return err - } - return nil -} - -func indexMetaPath(path string) string { - return filepath.Join(path, metaFilename) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index_stats.go b/vendor/github.com/blevesearch/bleve/v2/index_stats.go deleted file mode 100644 index 2d303f6eb..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/index_stats.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -import ( - "encoding/json" - "sync" - "sync/atomic" -) - -type IndexStat struct { - searches uint64 - searchTime uint64 - i *indexImpl -} - -func (is *IndexStat) statsMap() map[string]interface{} { - m := map[string]interface{}{} - m["index"] = is.i.i.StatsMap() - m["searches"] = atomic.LoadUint64(&is.searches) - m["search_time"] = atomic.LoadUint64(&is.searchTime) - return m -} - -func (is *IndexStat) MarshalJSON() ([]byte, error) { - m := is.statsMap() - return json.Marshal(m) -} - -type IndexStats struct { - indexes map[string]*IndexStat - mutex sync.RWMutex -} - -func NewIndexStats() *IndexStats { - return &IndexStats{ - indexes: make(map[string]*IndexStat), - } -} - -func (i *IndexStats) Register(index Index) { - i.mutex.Lock() - defer i.mutex.Unlock() - i.indexes[index.Name()] = index.Stats() -} - -func (i *IndexStats) UnRegister(index Index) { - i.mutex.Lock() - defer i.mutex.Unlock() - delete(i.indexes, index.Name()) -} - -func (i *IndexStats) String() string { - i.mutex.RLock() - defer i.mutex.RUnlock() - bytes, err := json.Marshal(i.indexes) - if err != nil { - return "error marshaling stats" - } - return string(bytes) -} - -var indexStats *IndexStats diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping.go b/vendor/github.com/blevesearch/bleve/v2/mapping.go deleted file mode 100644 index 54753073a..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/mapping.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -import "github.com/blevesearch/bleve/v2/mapping" - -// NewIndexMapping creates a new IndexMapping that will use all the default indexing rules -func NewIndexMapping() *mapping.IndexMappingImpl { - return mapping.NewIndexMapping() -} - -// NewDocumentMapping returns a new document mapping -// with all the default values. -func NewDocumentMapping() *mapping.DocumentMapping { - return mapping.NewDocumentMapping() -} - -// NewDocumentStaticMapping returns a new document -// mapping that will not automatically index parts -// of a document without an explicit mapping. -func NewDocumentStaticMapping() *mapping.DocumentMapping { - return mapping.NewDocumentStaticMapping() -} - -// NewDocumentDisabledMapping returns a new document -// mapping that will not perform any indexing. -func NewDocumentDisabledMapping() *mapping.DocumentMapping { - return mapping.NewDocumentDisabledMapping() -} - -// NewTextFieldMapping returns a default field mapping for text -func NewTextFieldMapping() *mapping.FieldMapping { - return mapping.NewTextFieldMapping() -} - -// NewNumericFieldMapping returns a default field mapping for numbers -func NewNumericFieldMapping() *mapping.FieldMapping { - return mapping.NewNumericFieldMapping() -} - -// NewDateTimeFieldMapping returns a default field mapping for dates -func NewDateTimeFieldMapping() *mapping.FieldMapping { - return mapping.NewDateTimeFieldMapping() -} - -// NewBooleanFieldMapping returns a default field mapping for booleans -func NewBooleanFieldMapping() *mapping.FieldMapping { - return mapping.NewBooleanFieldMapping() -} - -func NewGeoPointFieldMapping() *mapping.FieldMapping { - return mapping.NewGeoPointFieldMapping() -} diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping/analysis.go b/vendor/github.com/blevesearch/bleve/v2/mapping/analysis.go deleted file mode 100644 index 03e3cd01b..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/mapping/analysis.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapping - -type customAnalysis struct { - CharFilters map[string]map[string]interface{} `json:"char_filters,omitempty"` - Tokenizers map[string]map[string]interface{} `json:"tokenizers,omitempty"` - TokenMaps map[string]map[string]interface{} `json:"token_maps,omitempty"` - TokenFilters map[string]map[string]interface{} `json:"token_filters,omitempty"` - Analyzers map[string]map[string]interface{} `json:"analyzers,omitempty"` - DateTimeParsers map[string]map[string]interface{} `json:"date_time_parsers,omitempty"` -} - -func (c *customAnalysis) registerAll(i *IndexMappingImpl) error { - for name, config := range c.CharFilters { - _, err := i.cache.DefineCharFilter(name, config) - if err != nil { - return err - } - } - - if len(c.Tokenizers) > 0 { - // put all the names in map tracking work to do - todo := map[string]struct{}{} - for name := range c.Tokenizers { - todo[name] = struct{}{} - } - registered := 1 - errs := []error{} - // as long as we keep making progress, keep going - for len(todo) > 0 && registered > 0 { - registered = 0 - errs = []error{} - for name := range todo { - config := c.Tokenizers[name] - _, err := i.cache.DefineTokenizer(name, config) - if err != nil { - errs = append(errs, err) - } else { - delete(todo, name) - registered++ - } - } - } - - if len(errs) > 0 { - return errs[0] - } - } - for name, config := range c.TokenMaps { - _, err := i.cache.DefineTokenMap(name, config) - if err != nil { - return err - } - } - for name, config := range c.TokenFilters { - _, err := i.cache.DefineTokenFilter(name, config) - if err != nil { - return err - } - } - for name, config := range c.Analyzers { - _, err := i.cache.DefineAnalyzer(name, config) - if err != nil { - return err - } - } - for name, config := range c.DateTimeParsers { - _, err := i.cache.DefineDateTimeParser(name, config) - if err != nil { - return err - } - } - return nil -} - -func newCustomAnalysis() *customAnalysis { - rv := customAnalysis{ - CharFilters: make(map[string]map[string]interface{}), - Tokenizers: make(map[string]map[string]interface{}), - TokenMaps: make(map[string]map[string]interface{}), - TokenFilters: make(map[string]map[string]interface{}), - Analyzers: make(map[string]map[string]interface{}), - DateTimeParsers: make(map[string]map[string]interface{}), - } - return &rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping/document.go b/vendor/github.com/blevesearch/bleve/v2/mapping/document.go deleted file mode 100644 index 44911de77..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/mapping/document.go +++ /dev/null @@ -1,558 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapping - -import ( - "encoding" - "encoding/json" - "fmt" - "reflect" - "time" - - "github.com/blevesearch/bleve/v2/registry" -) - -// A DocumentMapping describes how a type of document -// should be indexed. -// As documents can be hierarchical, named sub-sections -// of documents are mapped using the same structure in -// the Properties field. -// Each value inside a document can be indexed 0 or more -// ways. These index entries are called fields and -// are stored in the Fields field. -// Entire sections of a document can be ignored or -// excluded by setting Enabled to false. -// If not explicitly mapped, default mapping operations -// are used. To disable this automatic handling, set -// Dynamic to false. -type DocumentMapping struct { - Enabled bool `json:"enabled"` - Dynamic bool `json:"dynamic"` - Properties map[string]*DocumentMapping `json:"properties,omitempty"` - Fields []*FieldMapping `json:"fields,omitempty"` - DefaultAnalyzer string `json:"default_analyzer,omitempty"` - - // StructTagKey overrides "json" when looking for field names in struct tags - StructTagKey string `json:"struct_tag_key,omitempty"` -} - -func (dm *DocumentMapping) Validate(cache *registry.Cache) error { - var err error - if dm.DefaultAnalyzer != "" { - _, err := cache.AnalyzerNamed(dm.DefaultAnalyzer) - if err != nil { - return err - } - } - for _, property := range dm.Properties { - err = property.Validate(cache) - if err != nil { - return err - } - } - for _, field := range dm.Fields { - if field.Analyzer != "" { - _, err = cache.AnalyzerNamed(field.Analyzer) - if err != nil { - return err - } - } - if field.DateFormat != "" { - _, err = cache.DateTimeParserNamed(field.DateFormat) - if err != nil { - return err - } - } - switch field.Type { - case "text", "datetime", "number", "boolean", "geopoint": - default: - return fmt.Errorf("unknown field type: '%s'", field.Type) - } - } - return nil -} - -// analyzerNameForPath attempts to first find the field -// described by this path, then returns the analyzer -// configured for that field -func (dm *DocumentMapping) analyzerNameForPath(path string) string { - field := dm.fieldDescribedByPath(path) - if field != nil { - return field.Analyzer - } - return "" -} - -func (dm *DocumentMapping) fieldDescribedByPath(path string) *FieldMapping { - pathElements := decodePath(path) - if len(pathElements) > 1 { - // easy case, there is more than 1 path element remaining - // the next path element must match a property name - // at this level - for propName, subDocMapping := range dm.Properties { - if propName == pathElements[0] { - return subDocMapping.fieldDescribedByPath(encodePath(pathElements[1:])) - } - } - } - - // either the path just had one element - // or it had multiple, but no match for the first element at this level - // look for match with full path - - // first look for property name with empty field - for propName, subDocMapping := range dm.Properties { - if propName == path { - // found property name match, now look at its fields - for _, field := range subDocMapping.Fields { - if field.Name == "" || field.Name == path { - // match - return field - } - } - } - } - // next, walk the properties again, looking for field overriding the name - for propName, subDocMapping := range dm.Properties { - if propName != path { - // property name isn't a match, but field name could override it - for _, field := range subDocMapping.Fields { - if field.Name == path { - return field - } - } - } - } - - return nil -} - -// documentMappingForPath only returns EXACT matches for a sub document -// or for an explicitly mapped field, if you want to find the -// closest document mapping to a field not explicitly mapped -// use closestDocMapping -func (dm *DocumentMapping) documentMappingForPath(path string) *DocumentMapping { - pathElements := decodePath(path) - current := dm -OUTER: - for i, pathElement := range pathElements { - for name, subDocMapping := range current.Properties { - if name == pathElement { - current = subDocMapping - continue OUTER - } - } - // no subDocMapping matches this pathElement - // only if this is the last element check for field name - if i == len(pathElements)-1 { - for _, field := range current.Fields { - if field.Name == pathElement { - break - } - } - } - - return nil - } - return current -} - -// closestDocMapping findest the most specific document mapping that matches -// part of the provided path -func (dm *DocumentMapping) closestDocMapping(path string) *DocumentMapping { - pathElements := decodePath(path) - current := dm -OUTER: - for _, pathElement := range pathElements { - for name, subDocMapping := range current.Properties { - if name == pathElement { - current = subDocMapping - continue OUTER - } - } - break - } - return current -} - -// NewDocumentMapping returns a new document mapping -// with all the default values. -func NewDocumentMapping() *DocumentMapping { - return &DocumentMapping{ - Enabled: true, - Dynamic: true, - } -} - -// NewDocumentStaticMapping returns a new document -// mapping that will not automatically index parts -// of a document without an explicit mapping. -func NewDocumentStaticMapping() *DocumentMapping { - return &DocumentMapping{ - Enabled: true, - } -} - -// NewDocumentDisabledMapping returns a new document -// mapping that will not perform any indexing. -func NewDocumentDisabledMapping() *DocumentMapping { - return &DocumentMapping{} -} - -// AddSubDocumentMapping adds the provided DocumentMapping as a sub-mapping -// for the specified named subsection. -func (dm *DocumentMapping) AddSubDocumentMapping(property string, sdm *DocumentMapping) { - if dm.Properties == nil { - dm.Properties = make(map[string]*DocumentMapping) - } - dm.Properties[property] = sdm -} - -// AddFieldMappingsAt adds one or more FieldMappings -// at the named sub-document. If the named sub-document -// doesn't yet exist it is created for you. -// This is a convenience function to make most common -// mappings more concise. -// Otherwise, you would: -// subMapping := NewDocumentMapping() -// subMapping.AddFieldMapping(fieldMapping) -// parentMapping.AddSubDocumentMapping(property, subMapping) -func (dm *DocumentMapping) AddFieldMappingsAt(property string, fms ...*FieldMapping) { - if dm.Properties == nil { - dm.Properties = make(map[string]*DocumentMapping) - } - sdm, ok := dm.Properties[property] - if !ok { - sdm = NewDocumentMapping() - } - for _, fm := range fms { - sdm.AddFieldMapping(fm) - } - dm.Properties[property] = sdm -} - -// AddFieldMapping adds the provided FieldMapping for this section -// of the document. -func (dm *DocumentMapping) AddFieldMapping(fm *FieldMapping) { - if dm.Fields == nil { - dm.Fields = make([]*FieldMapping, 0) - } - dm.Fields = append(dm.Fields, fm) -} - -// UnmarshalJSON offers custom unmarshaling with optional strict validation -func (dm *DocumentMapping) UnmarshalJSON(data []byte) error { - var tmp map[string]json.RawMessage - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - - // set defaults for fields which might have been omitted - dm.Enabled = true - dm.Dynamic = true - - var invalidKeys []string - for k, v := range tmp { - switch k { - case "enabled": - err := json.Unmarshal(v, &dm.Enabled) - if err != nil { - return err - } - case "dynamic": - err := json.Unmarshal(v, &dm.Dynamic) - if err != nil { - return err - } - case "default_analyzer": - err := json.Unmarshal(v, &dm.DefaultAnalyzer) - if err != nil { - return err - } - case "properties": - err := json.Unmarshal(v, &dm.Properties) - if err != nil { - return err - } - case "fields": - err := json.Unmarshal(v, &dm.Fields) - if err != nil { - return err - } - case "struct_tag_key": - err := json.Unmarshal(v, &dm.StructTagKey) - if err != nil { - return err - } - default: - invalidKeys = append(invalidKeys, k) - } - } - - if MappingJSONStrict && len(invalidKeys) > 0 { - return fmt.Errorf("document mapping contains invalid keys: %v", invalidKeys) - } - - return nil -} - -func (dm *DocumentMapping) defaultAnalyzerName(path []string) string { - current := dm - rv := current.DefaultAnalyzer - for _, pathElement := range path { - var ok bool - current, ok = current.Properties[pathElement] - if !ok { - break - } - if current.DefaultAnalyzer != "" { - rv = current.DefaultAnalyzer - } - } - return rv -} - -func (dm *DocumentMapping) walkDocument(data interface{}, path []string, indexes []uint64, context *walkContext) { - // allow default "json" tag to be overridden - structTagKey := dm.StructTagKey - if structTagKey == "" { - structTagKey = "json" - } - - val := reflect.ValueOf(data) - if !val.IsValid() { - return - } - - typ := val.Type() - switch typ.Kind() { - case reflect.Map: - // FIXME can add support for other map keys in the future - if typ.Key().Kind() == reflect.String { - for _, key := range val.MapKeys() { - fieldName := key.String() - fieldVal := val.MapIndex(key).Interface() - dm.processProperty(fieldVal, append(path, fieldName), indexes, context) - } - } - case reflect.Struct: - for i := 0; i < val.NumField(); i++ { - field := typ.Field(i) - fieldName := field.Name - // anonymous fields of type struct can elide the type name - if field.Anonymous && field.Type.Kind() == reflect.Struct { - fieldName = "" - } - - // if the field has a name under the specified tag, prefer that - tag := field.Tag.Get(structTagKey) - tagFieldName := parseTagName(tag) - if tagFieldName == "-" { - continue - } - // allow tag to set field name to empty, only if anonymous - if field.Tag != "" && (tagFieldName != "" || field.Anonymous) { - fieldName = tagFieldName - } - - if val.Field(i).CanInterface() { - fieldVal := val.Field(i).Interface() - newpath := path - if fieldName != "" { - newpath = append(path, fieldName) - } - dm.processProperty(fieldVal, newpath, indexes, context) - } - } - case reflect.Slice, reflect.Array: - for i := 0; i < val.Len(); i++ { - if val.Index(i).CanInterface() { - fieldVal := val.Index(i).Interface() - dm.processProperty(fieldVal, path, append(indexes, uint64(i)), context) - } - } - case reflect.Ptr: - ptrElem := val.Elem() - if ptrElem.IsValid() && ptrElem.CanInterface() { - dm.processProperty(ptrElem.Interface(), path, indexes, context) - } - case reflect.String: - dm.processProperty(val.String(), path, indexes, context) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - dm.processProperty(float64(val.Int()), path, indexes, context) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - dm.processProperty(float64(val.Uint()), path, indexes, context) - case reflect.Float32, reflect.Float64: - dm.processProperty(float64(val.Float()), path, indexes, context) - case reflect.Bool: - dm.processProperty(val.Bool(), path, indexes, context) - } - -} - -func (dm *DocumentMapping) processProperty(property interface{}, path []string, indexes []uint64, context *walkContext) { - pathString := encodePath(path) - // look to see if there is a mapping for this field - subDocMapping := dm.documentMappingForPath(pathString) - closestDocMapping := dm.closestDocMapping(pathString) - - // check to see if we even need to do further processing - if subDocMapping != nil && !subDocMapping.Enabled { - return - } - - propertyValue := reflect.ValueOf(property) - if !propertyValue.IsValid() { - // cannot do anything with the zero value - return - } - propertyType := propertyValue.Type() - switch propertyType.Kind() { - case reflect.String: - propertyValueString := propertyValue.String() - if subDocMapping != nil { - // index by explicit mapping - for _, fieldMapping := range subDocMapping.Fields { - if fieldMapping.Type == "geopoint" { - fieldMapping.processGeoPoint(property, pathString, path, indexes, context) - } else { - fieldMapping.processString(propertyValueString, pathString, path, indexes, context) - } - } - } else if closestDocMapping.Dynamic { - // automatic indexing behavior - - // first see if it can be parsed by the default date parser - dateTimeParser := context.im.DateTimeParserNamed(context.im.DefaultDateTimeParser) - if dateTimeParser != nil { - parsedDateTime, err := dateTimeParser.ParseDateTime(propertyValueString) - if err != nil { - // index as text - fieldMapping := newTextFieldMappingDynamic(context.im) - fieldMapping.processString(propertyValueString, pathString, path, indexes, context) - } else { - // index as datetime - fieldMapping := newDateTimeFieldMappingDynamic(context.im) - fieldMapping.processTime(parsedDateTime, pathString, path, indexes, context) - } - } - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - dm.processProperty(float64(propertyValue.Int()), path, indexes, context) - return - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - dm.processProperty(float64(propertyValue.Uint()), path, indexes, context) - return - case reflect.Float64, reflect.Float32: - propertyValFloat := propertyValue.Float() - if subDocMapping != nil { - // index by explicit mapping - for _, fieldMapping := range subDocMapping.Fields { - fieldMapping.processFloat64(propertyValFloat, pathString, path, indexes, context) - } - } else if closestDocMapping.Dynamic { - // automatic indexing behavior - fieldMapping := newNumericFieldMappingDynamic(context.im) - fieldMapping.processFloat64(propertyValFloat, pathString, path, indexes, context) - } - case reflect.Bool: - propertyValBool := propertyValue.Bool() - if subDocMapping != nil { - // index by explicit mapping - for _, fieldMapping := range subDocMapping.Fields { - fieldMapping.processBoolean(propertyValBool, pathString, path, indexes, context) - } - } else if closestDocMapping.Dynamic { - // automatic indexing behavior - fieldMapping := newBooleanFieldMappingDynamic(context.im) - fieldMapping.processBoolean(propertyValBool, pathString, path, indexes, context) - } - case reflect.Struct: - switch property := property.(type) { - case time.Time: - // don't descend into the time struct - if subDocMapping != nil { - // index by explicit mapping - for _, fieldMapping := range subDocMapping.Fields { - fieldMapping.processTime(property, pathString, path, indexes, context) - } - } else if closestDocMapping.Dynamic { - fieldMapping := newDateTimeFieldMappingDynamic(context.im) - fieldMapping.processTime(property, pathString, path, indexes, context) - } - case encoding.TextMarshaler: - txt, err := property.MarshalText() - if err == nil && subDocMapping != nil { - // index by explicit mapping - for _, fieldMapping := range subDocMapping.Fields { - if fieldMapping.Type == "text" { - fieldMapping.processString(string(txt), pathString, path, indexes, context) - } - } - } - dm.walkDocument(property, path, indexes, context) - default: - if subDocMapping != nil { - for _, fieldMapping := range subDocMapping.Fields { - if fieldMapping.Type == "geopoint" { - fieldMapping.processGeoPoint(property, pathString, path, indexes, context) - } - } - } - dm.walkDocument(property, path, indexes, context) - } - case reflect.Map, reflect.Slice: - if subDocMapping != nil { - for _, fieldMapping := range subDocMapping.Fields { - if fieldMapping.Type == "geopoint" { - fieldMapping.processGeoPoint(property, pathString, path, indexes, context) - } - } - } - dm.walkDocument(property, path, indexes, context) - case reflect.Ptr: - if !propertyValue.IsNil() { - switch property := property.(type) { - case encoding.TextMarshaler: - // ONLY process TextMarshaler if there is an explicit mapping - // AND all of the fiels are of type text - // OTHERWISE process field without TextMarshaler - if subDocMapping != nil { - allFieldsText := true - for _, fieldMapping := range subDocMapping.Fields { - if fieldMapping.Type != "text" { - allFieldsText = false - break - } - } - txt, err := property.MarshalText() - if err == nil && allFieldsText { - txtStr := string(txt) - for _, fieldMapping := range subDocMapping.Fields { - fieldMapping.processString(txtStr, pathString, path, indexes, context) - } - return - } - } - dm.walkDocument(property, path, indexes, context) - default: - dm.walkDocument(property, path, indexes, context) - } - } - default: - dm.walkDocument(property, path, indexes, context) - } -} diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping/field.go b/vendor/github.com/blevesearch/bleve/v2/mapping/field.go deleted file mode 100644 index 7ad1744d4..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/mapping/field.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapping - -import ( - "encoding/json" - "fmt" - "time" - - index "github.com/blevesearch/bleve_index_api" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/document" - "github.com/blevesearch/bleve/v2/geo" -) - -// control the default behavior for dynamic fields (those not explicitly mapped) -var ( - IndexDynamic = true - StoreDynamic = true - DocValuesDynamic = true // TODO revisit default? -) - -// A FieldMapping describes how a specific item -// should be put into the index. -type FieldMapping struct { - Name string `json:"name,omitempty"` - Type string `json:"type,omitempty"` - - // Analyzer specifies the name of the analyzer to use for this field. If - // Analyzer is empty, traverse the DocumentMapping tree toward the root and - // pick the first non-empty DefaultAnalyzer found. If there is none, use - // the IndexMapping.DefaultAnalyzer. - Analyzer string `json:"analyzer,omitempty"` - - // Store indicates whether to store field values in the index. Stored - // values can be retrieved from search results using SearchRequest.Fields. - Store bool `json:"store,omitempty"` - Index bool `json:"index,omitempty"` - - // IncludeTermVectors, if true, makes terms occurrences to be recorded for - // this field. It includes the term position within the terms sequence and - // the term offsets in the source document field. Term vectors are required - // to perform phrase queries or terms highlighting in source documents. - IncludeTermVectors bool `json:"include_term_vectors,omitempty"` - IncludeInAll bool `json:"include_in_all,omitempty"` - DateFormat string `json:"date_format,omitempty"` - - // DocValues, if true makes the index uninverting possible for this field - // It is useful for faceting and sorting queries. - DocValues bool `json:"docvalues,omitempty"` - - // SkipFreqNorm, if true, avoids the indexing of frequency and norm values - // of the tokens for this field. This option would be useful for saving - // the processing of freq/norm details when the default score based relevancy - // isn't needed. - SkipFreqNorm bool `json:"skip_freq_norm,omitempty"` -} - -// NewTextFieldMapping returns a default field mapping for text -func NewTextFieldMapping() *FieldMapping { - return &FieldMapping{ - Type: "text", - Store: true, - Index: true, - IncludeTermVectors: true, - IncludeInAll: true, - DocValues: true, - } -} - -func newTextFieldMappingDynamic(im *IndexMappingImpl) *FieldMapping { - rv := NewTextFieldMapping() - rv.Store = im.StoreDynamic - rv.Index = im.IndexDynamic - rv.DocValues = im.DocValuesDynamic - return rv -} - -// NewNumericFieldMapping returns a default field mapping for numbers -func NewNumericFieldMapping() *FieldMapping { - return &FieldMapping{ - Type: "number", - Store: true, - Index: true, - IncludeInAll: true, - DocValues: true, - } -} - -func newNumericFieldMappingDynamic(im *IndexMappingImpl) *FieldMapping { - rv := NewNumericFieldMapping() - rv.Store = im.StoreDynamic - rv.Index = im.IndexDynamic - rv.DocValues = im.DocValuesDynamic - return rv -} - -// NewDateTimeFieldMapping returns a default field mapping for dates -func NewDateTimeFieldMapping() *FieldMapping { - return &FieldMapping{ - Type: "datetime", - Store: true, - Index: true, - IncludeInAll: true, - DocValues: true, - } -} - -func newDateTimeFieldMappingDynamic(im *IndexMappingImpl) *FieldMapping { - rv := NewDateTimeFieldMapping() - rv.Store = im.StoreDynamic - rv.Index = im.IndexDynamic - rv.DocValues = im.DocValuesDynamic - return rv -} - -// NewBooleanFieldMapping returns a default field mapping for booleans -func NewBooleanFieldMapping() *FieldMapping { - return &FieldMapping{ - Type: "boolean", - Store: true, - Index: true, - IncludeInAll: true, - DocValues: true, - } -} - -func newBooleanFieldMappingDynamic(im *IndexMappingImpl) *FieldMapping { - rv := NewBooleanFieldMapping() - rv.Store = im.StoreDynamic - rv.Index = im.IndexDynamic - rv.DocValues = im.DocValuesDynamic - return rv -} - -// NewGeoPointFieldMapping returns a default field mapping for geo points -func NewGeoPointFieldMapping() *FieldMapping { - return &FieldMapping{ - Type: "geopoint", - Store: true, - Index: true, - IncludeInAll: true, - DocValues: true, - } -} - -// Options returns the indexing options for this field. -func (fm *FieldMapping) Options() index.FieldIndexingOptions { - var rv index.FieldIndexingOptions - if fm.Store { - rv |= index.StoreField - } - if fm.Index { - rv |= index.IndexField - } - if fm.IncludeTermVectors { - rv |= index.IncludeTermVectors - } - if fm.DocValues { - rv |= index.DocValues - } - if fm.SkipFreqNorm { - rv |= index.SkipFreqNorm - } - return rv -} - -func (fm *FieldMapping) processString(propertyValueString string, pathString string, path []string, indexes []uint64, context *walkContext) { - fieldName := getFieldName(pathString, path, fm) - options := fm.Options() - if fm.Type == "text" { - analyzer := fm.analyzerForField(path, context) - field := document.NewTextFieldCustom(fieldName, indexes, []byte(propertyValueString), options, analyzer) - context.doc.AddField(field) - - if !fm.IncludeInAll { - context.excludedFromAll = append(context.excludedFromAll, fieldName) - } - } else if fm.Type == "datetime" { - dateTimeFormat := context.im.DefaultDateTimeParser - if fm.DateFormat != "" { - dateTimeFormat = fm.DateFormat - } - dateTimeParser := context.im.DateTimeParserNamed(dateTimeFormat) - if dateTimeParser != nil { - parsedDateTime, err := dateTimeParser.ParseDateTime(propertyValueString) - if err == nil { - fm.processTime(parsedDateTime, pathString, path, indexes, context) - } - } - } -} - -func (fm *FieldMapping) processFloat64(propertyValFloat float64, pathString string, path []string, indexes []uint64, context *walkContext) { - fieldName := getFieldName(pathString, path, fm) - if fm.Type == "number" { - options := fm.Options() - field := document.NewNumericFieldWithIndexingOptions(fieldName, indexes, propertyValFloat, options) - context.doc.AddField(field) - - if !fm.IncludeInAll { - context.excludedFromAll = append(context.excludedFromAll, fieldName) - } - } -} - -func (fm *FieldMapping) processTime(propertyValueTime time.Time, pathString string, path []string, indexes []uint64, context *walkContext) { - fieldName := getFieldName(pathString, path, fm) - if fm.Type == "datetime" { - options := fm.Options() - field, err := document.NewDateTimeFieldWithIndexingOptions(fieldName, indexes, propertyValueTime, options) - if err == nil { - context.doc.AddField(field) - } else { - logger.Printf("could not build date %v", err) - } - - if !fm.IncludeInAll { - context.excludedFromAll = append(context.excludedFromAll, fieldName) - } - } -} - -func (fm *FieldMapping) processBoolean(propertyValueBool bool, pathString string, path []string, indexes []uint64, context *walkContext) { - fieldName := getFieldName(pathString, path, fm) - if fm.Type == "boolean" { - options := fm.Options() - field := document.NewBooleanFieldWithIndexingOptions(fieldName, indexes, propertyValueBool, options) - context.doc.AddField(field) - - if !fm.IncludeInAll { - context.excludedFromAll = append(context.excludedFromAll, fieldName) - } - } -} - -func (fm *FieldMapping) processGeoPoint(propertyMightBeGeoPoint interface{}, pathString string, path []string, indexes []uint64, context *walkContext) { - lon, lat, found := geo.ExtractGeoPoint(propertyMightBeGeoPoint) - if found { - fieldName := getFieldName(pathString, path, fm) - options := fm.Options() - field := document.NewGeoPointFieldWithIndexingOptions(fieldName, indexes, lon, lat, options) - context.doc.AddField(field) - - if !fm.IncludeInAll { - context.excludedFromAll = append(context.excludedFromAll, fieldName) - } - } -} - -func (fm *FieldMapping) analyzerForField(path []string, context *walkContext) *analysis.Analyzer { - analyzerName := fm.Analyzer - if analyzerName == "" { - analyzerName = context.dm.defaultAnalyzerName(path) - if analyzerName == "" { - analyzerName = context.im.DefaultAnalyzer - } - } - return context.im.AnalyzerNamed(analyzerName) -} - -func getFieldName(pathString string, path []string, fieldMapping *FieldMapping) string { - fieldName := pathString - if fieldMapping.Name != "" { - parentName := "" - if len(path) > 1 { - parentName = encodePath(path[:len(path)-1]) + pathSeparator - } - fieldName = parentName + fieldMapping.Name - } - return fieldName -} - -// UnmarshalJSON offers custom unmarshaling with optional strict validation -func (fm *FieldMapping) UnmarshalJSON(data []byte) error { - - var tmp map[string]json.RawMessage - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - - var invalidKeys []string - for k, v := range tmp { - switch k { - case "name": - err := json.Unmarshal(v, &fm.Name) - if err != nil { - return err - } - case "type": - err := json.Unmarshal(v, &fm.Type) - if err != nil { - return err - } - case "analyzer": - err := json.Unmarshal(v, &fm.Analyzer) - if err != nil { - return err - } - case "store": - err := json.Unmarshal(v, &fm.Store) - if err != nil { - return err - } - case "index": - err := json.Unmarshal(v, &fm.Index) - if err != nil { - return err - } - case "include_term_vectors": - err := json.Unmarshal(v, &fm.IncludeTermVectors) - if err != nil { - return err - } - case "include_in_all": - err := json.Unmarshal(v, &fm.IncludeInAll) - if err != nil { - return err - } - case "date_format": - err := json.Unmarshal(v, &fm.DateFormat) - if err != nil { - return err - } - case "docvalues": - err := json.Unmarshal(v, &fm.DocValues) - if err != nil { - return err - } - case "skip_freq_norm": - err := json.Unmarshal(v, &fm.SkipFreqNorm) - if err != nil { - return err - } - default: - invalidKeys = append(invalidKeys, k) - } - } - - if MappingJSONStrict && len(invalidKeys) > 0 { - return fmt.Errorf("field mapping contains invalid keys: %v", invalidKeys) - } - - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping/index.go b/vendor/github.com/blevesearch/bleve/v2/mapping/index.go deleted file mode 100644 index c34343385..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/mapping/index.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapping - -import ( - "encoding/json" - "fmt" - index "github.com/blevesearch/bleve_index_api" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/analysis/analyzer/standard" - "github.com/blevesearch/bleve/v2/analysis/datetime/optional" - "github.com/blevesearch/bleve/v2/document" - "github.com/blevesearch/bleve/v2/registry" -) - -var MappingJSONStrict = false - -const defaultTypeField = "_type" -const defaultType = "_default" -const defaultField = "_all" -const defaultAnalyzer = standard.Name -const defaultDateTimeParser = optional.Name - -// An IndexMappingImpl controls how objects are placed -// into an index. -// First the type of the object is determined. -// Once the type is know, the appropriate -// DocumentMapping is selected by the type. -// If no mapping was determined for that type, -// a DefaultMapping will be used. -type IndexMappingImpl struct { - TypeMapping map[string]*DocumentMapping `json:"types,omitempty"` - DefaultMapping *DocumentMapping `json:"default_mapping"` - TypeField string `json:"type_field"` - DefaultType string `json:"default_type"` - DefaultAnalyzer string `json:"default_analyzer"` - DefaultDateTimeParser string `json:"default_datetime_parser"` - DefaultField string `json:"default_field"` - StoreDynamic bool `json:"store_dynamic"` - IndexDynamic bool `json:"index_dynamic"` - DocValuesDynamic bool `json:"docvalues_dynamic"` - CustomAnalysis *customAnalysis `json:"analysis,omitempty"` - cache *registry.Cache -} - -// AddCustomCharFilter defines a custom char filter for use in this mapping -func (im *IndexMappingImpl) AddCustomCharFilter(name string, config map[string]interface{}) error { - _, err := im.cache.DefineCharFilter(name, config) - if err != nil { - return err - } - im.CustomAnalysis.CharFilters[name] = config - return nil -} - -// AddCustomTokenizer defines a custom tokenizer for use in this mapping -func (im *IndexMappingImpl) AddCustomTokenizer(name string, config map[string]interface{}) error { - _, err := im.cache.DefineTokenizer(name, config) - if err != nil { - return err - } - im.CustomAnalysis.Tokenizers[name] = config - return nil -} - -// AddCustomTokenMap defines a custom token map for use in this mapping -func (im *IndexMappingImpl) AddCustomTokenMap(name string, config map[string]interface{}) error { - _, err := im.cache.DefineTokenMap(name, config) - if err != nil { - return err - } - im.CustomAnalysis.TokenMaps[name] = config - return nil -} - -// AddCustomTokenFilter defines a custom token filter for use in this mapping -func (im *IndexMappingImpl) AddCustomTokenFilter(name string, config map[string]interface{}) error { - _, err := im.cache.DefineTokenFilter(name, config) - if err != nil { - return err - } - im.CustomAnalysis.TokenFilters[name] = config - return nil -} - -// AddCustomAnalyzer defines a custom analyzer for use in this mapping. The -// config map must have a "type" string entry to resolve the analyzer -// constructor. The constructor is invoked with the remaining entries and -// returned analyzer is registered in the IndexMapping. -// -// bleve comes with predefined analyzers, like -// github.com/blevesearch/bleve/analysis/analyzer/custom. They are -// available only if their package is imported by client code. To achieve this, -// use their metadata to fill configuration entries: -// -// import ( -// "github.com/blevesearch/bleve/v2/analysis/analyzer/custom" -// "github.com/blevesearch/bleve/v2/analysis/char/html" -// "github.com/blevesearch/bleve/v2/analysis/token/lowercase" -// "github.com/blevesearch/bleve/v2/analysis/tokenizer/unicode" -// ) -// -// m := bleve.NewIndexMapping() -// err := m.AddCustomAnalyzer("html", map[string]interface{}{ -// "type": custom.Name, -// "char_filters": []string{ -// html.Name, -// }, -// "tokenizer": unicode.Name, -// "token_filters": []string{ -// lowercase.Name, -// ... -// }, -// }) -func (im *IndexMappingImpl) AddCustomAnalyzer(name string, config map[string]interface{}) error { - _, err := im.cache.DefineAnalyzer(name, config) - if err != nil { - return err - } - im.CustomAnalysis.Analyzers[name] = config - return nil -} - -// AddCustomDateTimeParser defines a custom date time parser for use in this mapping -func (im *IndexMappingImpl) AddCustomDateTimeParser(name string, config map[string]interface{}) error { - _, err := im.cache.DefineDateTimeParser(name, config) - if err != nil { - return err - } - im.CustomAnalysis.DateTimeParsers[name] = config - return nil -} - -// NewIndexMapping creates a new IndexMapping that will use all the default indexing rules -func NewIndexMapping() *IndexMappingImpl { - return &IndexMappingImpl{ - TypeMapping: make(map[string]*DocumentMapping), - DefaultMapping: NewDocumentMapping(), - TypeField: defaultTypeField, - DefaultType: defaultType, - DefaultAnalyzer: defaultAnalyzer, - DefaultDateTimeParser: defaultDateTimeParser, - DefaultField: defaultField, - IndexDynamic: IndexDynamic, - StoreDynamic: StoreDynamic, - DocValuesDynamic: DocValuesDynamic, - CustomAnalysis: newCustomAnalysis(), - cache: registry.NewCache(), - } -} - -// Validate will walk the entire structure ensuring the following -// explicitly named and default analyzers can be built -func (im *IndexMappingImpl) Validate() error { - _, err := im.cache.AnalyzerNamed(im.DefaultAnalyzer) - if err != nil { - return err - } - _, err = im.cache.DateTimeParserNamed(im.DefaultDateTimeParser) - if err != nil { - return err - } - err = im.DefaultMapping.Validate(im.cache) - if err != nil { - return err - } - for _, docMapping := range im.TypeMapping { - err = docMapping.Validate(im.cache) - if err != nil { - return err - } - } - return nil -} - -// AddDocumentMapping sets a custom document mapping for the specified type -func (im *IndexMappingImpl) AddDocumentMapping(doctype string, dm *DocumentMapping) { - im.TypeMapping[doctype] = dm -} - -func (im *IndexMappingImpl) mappingForType(docType string) *DocumentMapping { - docMapping := im.TypeMapping[docType] - if docMapping == nil { - docMapping = im.DefaultMapping - } - return docMapping -} - -// UnmarshalJSON offers custom unmarshaling with optional strict validation -func (im *IndexMappingImpl) UnmarshalJSON(data []byte) error { - - var tmp map[string]json.RawMessage - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - - // set defaults for fields which might have been omitted - im.cache = registry.NewCache() - im.CustomAnalysis = newCustomAnalysis() - im.TypeField = defaultTypeField - im.DefaultType = defaultType - im.DefaultAnalyzer = defaultAnalyzer - im.DefaultDateTimeParser = defaultDateTimeParser - im.DefaultField = defaultField - im.DefaultMapping = NewDocumentMapping() - im.TypeMapping = make(map[string]*DocumentMapping) - im.StoreDynamic = StoreDynamic - im.IndexDynamic = IndexDynamic - im.DocValuesDynamic = DocValuesDynamic - - var invalidKeys []string - for k, v := range tmp { - switch k { - case "analysis": - err := json.Unmarshal(v, &im.CustomAnalysis) - if err != nil { - return err - } - case "type_field": - err := json.Unmarshal(v, &im.TypeField) - if err != nil { - return err - } - case "default_type": - err := json.Unmarshal(v, &im.DefaultType) - if err != nil { - return err - } - case "default_analyzer": - err := json.Unmarshal(v, &im.DefaultAnalyzer) - if err != nil { - return err - } - case "default_datetime_parser": - err := json.Unmarshal(v, &im.DefaultDateTimeParser) - if err != nil { - return err - } - case "default_field": - err := json.Unmarshal(v, &im.DefaultField) - if err != nil { - return err - } - case "default_mapping": - err := json.Unmarshal(v, &im.DefaultMapping) - if err != nil { - return err - } - case "types": - err := json.Unmarshal(v, &im.TypeMapping) - if err != nil { - return err - } - case "store_dynamic": - err := json.Unmarshal(v, &im.StoreDynamic) - if err != nil { - return err - } - case "index_dynamic": - err := json.Unmarshal(v, &im.IndexDynamic) - if err != nil { - return err - } - case "docvalues_dynamic": - err := json.Unmarshal(v, &im.DocValuesDynamic) - if err != nil { - return err - } - default: - invalidKeys = append(invalidKeys, k) - } - } - - if MappingJSONStrict && len(invalidKeys) > 0 { - return fmt.Errorf("index mapping contains invalid keys: %v", invalidKeys) - } - - err = im.CustomAnalysis.registerAll(im) - if err != nil { - return err - } - - return nil -} - -func (im *IndexMappingImpl) determineType(data interface{}) string { - // first see if the object implements bleveClassifier - bleveClassifier, ok := data.(bleveClassifier) - if ok { - return bleveClassifier.BleveType() - } - // next see if the object implements Classifier - classifier, ok := data.(Classifier) - if ok { - return classifier.Type() - } - - // now see if we can find a type using the mapping - typ, ok := mustString(lookupPropertyPath(data, im.TypeField)) - if ok { - return typ - } - - return im.DefaultType -} - -func (im *IndexMappingImpl) MapDocument(doc *document.Document, data interface{}) error { - docType := im.determineType(data) - docMapping := im.mappingForType(docType) - if docMapping.Enabled { - walkContext := im.newWalkContext(doc, docMapping) - docMapping.walkDocument(data, []string{}, []uint64{}, walkContext) - - // see if the _all field was disabled - allMapping := docMapping.documentMappingForPath("_all") - if allMapping == nil || allMapping.Enabled { - field := document.NewCompositeFieldWithIndexingOptions("_all", true, []string{}, walkContext.excludedFromAll, index.IndexField|index.IncludeTermVectors) - doc.AddField(field) - } - } - - return nil -} - -type walkContext struct { - doc *document.Document - im *IndexMappingImpl - dm *DocumentMapping - excludedFromAll []string -} - -func (im *IndexMappingImpl) newWalkContext(doc *document.Document, dm *DocumentMapping) *walkContext { - return &walkContext{ - doc: doc, - im: im, - dm: dm, - excludedFromAll: []string{"_id"}, - } -} - -// AnalyzerNameForPath attempts to find the best analyzer to use with only a -// field name will walk all the document types, look for field mappings at the -// provided path, if one exists and it has an explicit analyzer that is -// returned. -func (im *IndexMappingImpl) AnalyzerNameForPath(path string) string { - // first we look for explicit mapping on the field - for _, docMapping := range im.TypeMapping { - analyzerName := docMapping.analyzerNameForPath(path) - if analyzerName != "" { - return analyzerName - } - } - // now try the default mapping - pathMapping := im.DefaultMapping.documentMappingForPath(path) - if pathMapping != nil { - if len(pathMapping.Fields) > 0 { - if pathMapping.Fields[0].Analyzer != "" { - return pathMapping.Fields[0].Analyzer - } - } - } - - // next we will try default analyzers for the path - pathDecoded := decodePath(path) - for _, docMapping := range im.TypeMapping { - rv := docMapping.defaultAnalyzerName(pathDecoded) - if rv != "" { - return rv - } - } - - return im.DefaultAnalyzer -} - -func (im *IndexMappingImpl) AnalyzerNamed(name string) *analysis.Analyzer { - analyzer, err := im.cache.AnalyzerNamed(name) - if err != nil { - logger.Printf("error using analyzer named: %s", name) - return nil - } - return analyzer -} - -func (im *IndexMappingImpl) DateTimeParserNamed(name string) analysis.DateTimeParser { - if name == "" { - name = im.DefaultDateTimeParser - } - dateTimeParser, err := im.cache.DateTimeParserNamed(name) - if err != nil { - logger.Printf("error using datetime parser named: %s", name) - return nil - } - return dateTimeParser -} - -func (im *IndexMappingImpl) datetimeParserNameForPath(path string) string { - - // first we look for explicit mapping on the field - for _, docMapping := range im.TypeMapping { - pathMapping := docMapping.documentMappingForPath(path) - if pathMapping != nil { - if len(pathMapping.Fields) > 0 { - if pathMapping.Fields[0].Analyzer != "" { - return pathMapping.Fields[0].Analyzer - } - } - } - } - - return im.DefaultDateTimeParser -} - -func (im *IndexMappingImpl) AnalyzeText(analyzerName string, text []byte) (analysis.TokenStream, error) { - analyzer, err := im.cache.AnalyzerNamed(analyzerName) - if err != nil { - return nil, err - } - return analyzer.Analyze(text), nil -} - -// FieldAnalyzer returns the name of the analyzer used on a field. -func (im *IndexMappingImpl) FieldAnalyzer(field string) string { - return im.AnalyzerNameForPath(field) -} - -// wrapper to satisfy new interface - -func (im *IndexMappingImpl) DefaultSearchField() string { - return im.DefaultField -} diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping/mapping.go b/vendor/github.com/blevesearch/bleve/v2/mapping/mapping.go deleted file mode 100644 index bcf6749a4..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/mapping/mapping.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapping - -import ( - "io/ioutil" - "log" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/document" -) - -// A Classifier is an interface describing any object which knows how to -// identify its own type. Alternatively, if a struct already has a Type -// field or method in conflict, one can use BleveType instead. -type Classifier interface { - Type() string -} - -// A bleveClassifier is an interface describing any object which knows how -// to identify its own type. This is introduced as an alternative to the -// Classifier interface which often has naming conflicts with existing -// structures. -type bleveClassifier interface { - BleveType() string -} - -var logger = log.New(ioutil.Discard, "bleve mapping ", log.LstdFlags) - -// SetLog sets the logger used for logging -// by default log messages are sent to ioutil.Discard -func SetLog(l *log.Logger) { - logger = l -} - -type IndexMapping interface { - MapDocument(doc *document.Document, data interface{}) error - Validate() error - - DateTimeParserNamed(name string) analysis.DateTimeParser - - DefaultSearchField() string - - AnalyzerNameForPath(path string) string - AnalyzerNamed(name string) *analysis.Analyzer -} diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping/reflect.go b/vendor/github.com/blevesearch/bleve/v2/mapping/reflect.go deleted file mode 100644 index 6500a7059..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/mapping/reflect.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapping - -import ( - "reflect" - "strings" -) - -func lookupPropertyPath(data interface{}, path string) interface{} { - pathParts := decodePath(path) - - current := data - for _, part := range pathParts { - current = lookupPropertyPathPart(current, part) - if current == nil { - break - } - } - - return current -} - -func lookupPropertyPathPart(data interface{}, part string) interface{} { - val := reflect.ValueOf(data) - if !val.IsValid() { - return nil - } - typ := val.Type() - switch typ.Kind() { - case reflect.Map: - // FIXME can add support for other map keys in the future - if typ.Key().Kind() == reflect.String { - key := reflect.ValueOf(part) - entry := val.MapIndex(key) - if entry.IsValid() { - return entry.Interface() - } - } - case reflect.Struct: - field := val.FieldByName(part) - if field.IsValid() && field.CanInterface() { - return field.Interface() - } - case reflect.Ptr: - ptrElem := val.Elem() - if ptrElem.IsValid() && ptrElem.CanInterface() { - return lookupPropertyPathPart(ptrElem.Interface(), part) - } - } - return nil -} - -const pathSeparator = "." - -func decodePath(path string) []string { - return strings.Split(path, pathSeparator) -} - -func encodePath(pathElements []string) string { - return strings.Join(pathElements, pathSeparator) -} - -func mustString(data interface{}) (string, bool) { - if data != nil { - str, ok := data.(string) - if ok { - return str, true - } - } - return "", false -} - -// parseTagName extracts the field name from a struct tag -func parseTagName(tag string) string { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx] - } - return tag -} diff --git a/vendor/github.com/blevesearch/bleve/v2/query.go b/vendor/github.com/blevesearch/bleve/v2/query.go deleted file mode 100644 index 91d2d5f42..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/query.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -import ( - "time" - - "github.com/blevesearch/bleve/v2/search/query" -) - -// NewBoolFieldQuery creates a new Query for boolean fields -func NewBoolFieldQuery(val bool) *query.BoolFieldQuery { - return query.NewBoolFieldQuery(val) -} - -// NewBooleanQuery creates a compound Query composed -// of several other Query objects. -// These other query objects are added using the -// AddMust() AddShould() and AddMustNot() methods. -// Result documents must satisfy ALL of the -// must Queries. -// Result documents must satisfy NONE of the must not -// Queries. -// Result documents that ALSO satisfy any of the should -// Queries will score higher. -func NewBooleanQuery() *query.BooleanQuery { - return query.NewBooleanQuery(nil, nil, nil) -} - -// NewConjunctionQuery creates a new compound Query. -// Result documents must satisfy all of the queries. -func NewConjunctionQuery(conjuncts ...query.Query) *query.ConjunctionQuery { - return query.NewConjunctionQuery(conjuncts) -} - -// NewDateRangeQuery creates a new Query for ranges -// of date values. -// Date strings are parsed using the DateTimeParser configured in the -// top-level config.QueryDateTimeParser -// Either, but not both endpoints can be nil. -func NewDateRangeQuery(start, end time.Time) *query.DateRangeQuery { - return query.NewDateRangeQuery(start, end) -} - -// NewDateRangeInclusiveQuery creates a new Query for ranges -// of date values. -// Date strings are parsed using the DateTimeParser configured in the -// top-level config.QueryDateTimeParser -// Either, but not both endpoints can be nil. -// startInclusive and endInclusive control inclusion of the endpoints. -func NewDateRangeInclusiveQuery(start, end time.Time, startInclusive, endInclusive *bool) *query.DateRangeQuery { - return query.NewDateRangeInclusiveQuery(start, end, startInclusive, endInclusive) -} - -// NewDisjunctionQuery creates a new compound Query. -// Result documents satisfy at least one Query. -func NewDisjunctionQuery(disjuncts ...query.Query) *query.DisjunctionQuery { - return query.NewDisjunctionQuery(disjuncts) -} - -// NewDocIDQuery creates a new Query object returning indexed documents among -// the specified set. Combine it with ConjunctionQuery to restrict the scope of -// other queries output. -func NewDocIDQuery(ids []string) *query.DocIDQuery { - return query.NewDocIDQuery(ids) -} - -// NewFuzzyQuery creates a new Query which finds -// documents containing terms within a specific -// fuzziness of the specified term. -// The default fuzziness is 1. -// -// The current implementation uses Levenshtein edit -// distance as the fuzziness metric. -func NewFuzzyQuery(term string) *query.FuzzyQuery { - return query.NewFuzzyQuery(term) -} - -// NewMatchAllQuery creates a Query which will -// match all documents in the index. -func NewMatchAllQuery() *query.MatchAllQuery { - return query.NewMatchAllQuery() -} - -// NewMatchNoneQuery creates a Query which will not -// match any documents in the index. -func NewMatchNoneQuery() *query.MatchNoneQuery { - return query.NewMatchNoneQuery() -} - -// NewMatchPhraseQuery creates a new Query object -// for matching phrases in the index. -// An Analyzer is chosen based on the field. -// Input text is analyzed using this analyzer. -// Token terms resulting from this analysis are -// used to build a search phrase. Result documents -// must match this phrase. Queried field must have been indexed with -// IncludeTermVectors set to true. -func NewMatchPhraseQuery(matchPhrase string) *query.MatchPhraseQuery { - return query.NewMatchPhraseQuery(matchPhrase) -} - -// NewMatchQuery creates a Query for matching text. -// An Analyzer is chosen based on the field. -// Input text is analyzed using this analyzer. -// Token terms resulting from this analysis are -// used to perform term searches. Result documents -// must satisfy at least one of these term searches. -func NewMatchQuery(match string) *query.MatchQuery { - return query.NewMatchQuery(match) -} - -// NewNumericRangeQuery creates a new Query for ranges -// of numeric values. -// Either, but not both endpoints can be nil. -// The minimum value is inclusive. -// The maximum value is exclusive. -func NewNumericRangeQuery(min, max *float64) *query.NumericRangeQuery { - return query.NewNumericRangeQuery(min, max) -} - -// NewNumericRangeInclusiveQuery creates a new Query for ranges -// of numeric values. -// Either, but not both endpoints can be nil. -// Control endpoint inclusion with inclusiveMin, inclusiveMax. -func NewNumericRangeInclusiveQuery(min, max *float64, minInclusive, maxInclusive *bool) *query.NumericRangeQuery { - return query.NewNumericRangeInclusiveQuery(min, max, minInclusive, maxInclusive) -} - -// NewTermRangeQuery creates a new Query for ranges -// of text terms. -// Either, but not both endpoints can be "". -// The minimum value is inclusive. -// The maximum value is exclusive. -func NewTermRangeQuery(min, max string) *query.TermRangeQuery { - return query.NewTermRangeQuery(min, max) -} - -// NewTermRangeInclusiveQuery creates a new Query for ranges -// of text terms. -// Either, but not both endpoints can be "". -// Control endpoint inclusion with inclusiveMin, inclusiveMax. -func NewTermRangeInclusiveQuery(min, max string, minInclusive, maxInclusive *bool) *query.TermRangeQuery { - return query.NewTermRangeInclusiveQuery(min, max, minInclusive, maxInclusive) -} - -// NewPhraseQuery creates a new Query for finding -// exact term phrases in the index. -// The provided terms must exist in the correct -// order, at the correct index offsets, in the -// specified field. Queried field must have been indexed with -// IncludeTermVectors set to true. -func NewPhraseQuery(terms []string, field string) *query.PhraseQuery { - return query.NewPhraseQuery(terms, field) -} - -// NewPrefixQuery creates a new Query which finds -// documents containing terms that start with the -// specified prefix. -func NewPrefixQuery(prefix string) *query.PrefixQuery { - return query.NewPrefixQuery(prefix) -} - -// NewRegexpQuery creates a new Query which finds -// documents containing terms that match the -// specified regular expression. -func NewRegexpQuery(regexp string) *query.RegexpQuery { - return query.NewRegexpQuery(regexp) -} - -// NewQueryStringQuery creates a new Query used for -// finding documents that satisfy a query string. The -// query string is a small query language for humans. -func NewQueryStringQuery(q string) *query.QueryStringQuery { - return query.NewQueryStringQuery(q) -} - -// NewTermQuery creates a new Query for finding an -// exact term match in the index. -func NewTermQuery(term string) *query.TermQuery { - return query.NewTermQuery(term) -} - -// NewWildcardQuery creates a new Query which finds -// documents containing terms that match the -// specified wildcard. In the wildcard pattern '*' -// will match any sequence of 0 or more characters, -// and '?' will match any single character. -func NewWildcardQuery(wildcard string) *query.WildcardQuery { - return query.NewWildcardQuery(wildcard) -} - -// NewGeoBoundingBoxQuery creates a new Query for performing geo bounding -// box searches. The arguments describe the position of the box and documents -// which have an indexed geo point inside the box will be returned. -func NewGeoBoundingBoxQuery(topLeftLon, topLeftLat, bottomRightLon, bottomRightLat float64) *query.GeoBoundingBoxQuery { - return query.NewGeoBoundingBoxQuery(topLeftLon, topLeftLat, bottomRightLon, bottomRightLat) -} - -// NewGeoDistanceQuery creates a new Query for performing geo distance -// searches. The arguments describe a position and a distance. Documents -// which have an indexed geo point which is less than or equal to the provided -// distance from the given position will be returned. -func NewGeoDistanceQuery(lon, lat float64, distance string) *query.GeoDistanceQuery { - return query.NewGeoDistanceQuery(lon, lat, distance) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/analyzer.go b/vendor/github.com/blevesearch/bleve/v2/registry/analyzer.go deleted file mode 100644 index 6c0d73b31..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/analyzer.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/analysis" -) - -func RegisterAnalyzer(name string, constructor AnalyzerConstructor) { - _, exists := analyzers[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate analyzer named '%s'", name)) - } - analyzers[name] = constructor -} - -type AnalyzerConstructor func(config map[string]interface{}, cache *Cache) (*analysis.Analyzer, error) -type AnalyzerRegistry map[string]AnalyzerConstructor - -type AnalyzerCache struct { - *ConcurrentCache -} - -func NewAnalyzerCache() *AnalyzerCache { - return &AnalyzerCache{ - NewConcurrentCache(), - } -} - -func AnalyzerBuild(name string, config map[string]interface{}, cache *Cache) (interface{}, error) { - cons, registered := analyzers[name] - if !registered { - return nil, fmt.Errorf("no analyzer with name or type '%s' registered", name) - } - analyzer, err := cons(config, cache) - if err != nil { - return nil, fmt.Errorf("error building analyzer: %v", err) - } - return analyzer, nil -} - -func (c *AnalyzerCache) AnalyzerNamed(name string, cache *Cache) (*analysis.Analyzer, error) { - item, err := c.ItemNamed(name, cache, AnalyzerBuild) - if err != nil { - return nil, err - } - return item.(*analysis.Analyzer), nil -} - -func (c *AnalyzerCache) DefineAnalyzer(name string, typ string, config map[string]interface{}, cache *Cache) (*analysis.Analyzer, error) { - item, err := c.DefineItem(name, typ, config, cache, AnalyzerBuild) - if err != nil { - if err == ErrAlreadyDefined { - return nil, fmt.Errorf("analyzer named '%s' already defined", name) - } - return nil, err - } - return item.(*analysis.Analyzer), nil -} - -func AnalyzerTypesAndInstances() ([]string, []string) { - emptyConfig := map[string]interface{}{} - emptyCache := NewCache() - var types []string - var instances []string - for name, cons := range analyzers { - _, err := cons(emptyConfig, emptyCache) - if err == nil { - instances = append(instances, name) - } else { - types = append(types, name) - } - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/cache.go b/vendor/github.com/blevesearch/bleve/v2/registry/cache.go deleted file mode 100644 index b0ce852a2..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/cache.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2016 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - "sync" -) - -var ErrAlreadyDefined = fmt.Errorf("item already defined") - -type CacheBuild func(name string, config map[string]interface{}, cache *Cache) (interface{}, error) - -type ConcurrentCache struct { - mutex sync.RWMutex - data map[string]interface{} -} - -func NewConcurrentCache() *ConcurrentCache { - return &ConcurrentCache{ - data: make(map[string]interface{}), - } -} - -func (c *ConcurrentCache) ItemNamed(name string, cache *Cache, build CacheBuild) (interface{}, error) { - c.mutex.RLock() - item, cached := c.data[name] - if cached { - c.mutex.RUnlock() - return item, nil - } - // give up read lock - c.mutex.RUnlock() - // try to build it - newItem, err := build(name, nil, cache) - if err != nil { - return nil, err - } - // acquire write lock - c.mutex.Lock() - defer c.mutex.Unlock() - // check again because it could have been created while trading locks - item, cached = c.data[name] - if cached { - return item, nil - } - c.data[name] = newItem - return newItem, nil -} - -func (c *ConcurrentCache) DefineItem(name string, typ string, config map[string]interface{}, cache *Cache, build CacheBuild) (interface{}, error) { - c.mutex.RLock() - _, cached := c.data[name] - if cached { - c.mutex.RUnlock() - return nil, ErrAlreadyDefined - } - // give up read lock so others lookups can proceed - c.mutex.RUnlock() - // really not there, try to build it - newItem, err := build(typ, config, cache) - if err != nil { - return nil, err - } - // now we've built it, acquire lock - c.mutex.Lock() - defer c.mutex.Unlock() - // check again because it could have been created while trading locks - _, cached = c.data[name] - if cached { - return nil, ErrAlreadyDefined - } - c.data[name] = newItem - return newItem, nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/char_filter.go b/vendor/github.com/blevesearch/bleve/v2/registry/char_filter.go deleted file mode 100644 index aa400be68..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/char_filter.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/analysis" -) - -func RegisterCharFilter(name string, constructor CharFilterConstructor) { - _, exists := charFilters[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate char filter named '%s'", name)) - } - charFilters[name] = constructor -} - -type CharFilterConstructor func(config map[string]interface{}, cache *Cache) (analysis.CharFilter, error) -type CharFilterRegistry map[string]CharFilterConstructor - -type CharFilterCache struct { - *ConcurrentCache -} - -func NewCharFilterCache() *CharFilterCache { - return &CharFilterCache{ - NewConcurrentCache(), - } -} - -func CharFilterBuild(name string, config map[string]interface{}, cache *Cache) (interface{}, error) { - cons, registered := charFilters[name] - if !registered { - return nil, fmt.Errorf("no char filter with name or type '%s' registered", name) - } - charFilter, err := cons(config, cache) - if err != nil { - return nil, fmt.Errorf("error building char filter: %v", err) - } - return charFilter, nil -} - -func (c *CharFilterCache) CharFilterNamed(name string, cache *Cache) (analysis.CharFilter, error) { - item, err := c.ItemNamed(name, cache, CharFilterBuild) - if err != nil { - return nil, err - } - return item.(analysis.CharFilter), nil -} - -func (c *CharFilterCache) DefineCharFilter(name string, typ string, config map[string]interface{}, cache *Cache) (analysis.CharFilter, error) { - item, err := c.DefineItem(name, typ, config, cache, CharFilterBuild) - if err != nil { - if err == ErrAlreadyDefined { - return nil, fmt.Errorf("char filter named '%s' already defined", name) - } - return nil, err - } - return item.(analysis.CharFilter), nil -} - -func CharFilterTypesAndInstances() ([]string, []string) { - emptyConfig := map[string]interface{}{} - emptyCache := NewCache() - var types []string - var instances []string - for name, cons := range charFilters { - _, err := cons(emptyConfig, emptyCache) - if err == nil { - instances = append(instances, name) - } else { - types = append(types, name) - } - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/datetime_parser.go b/vendor/github.com/blevesearch/bleve/v2/registry/datetime_parser.go deleted file mode 100644 index a2d8ac24a..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/datetime_parser.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/analysis" -) - -func RegisterDateTimeParser(name string, constructor DateTimeParserConstructor) { - _, exists := dateTimeParsers[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate date time parser named '%s'", name)) - } - dateTimeParsers[name] = constructor -} - -type DateTimeParserConstructor func(config map[string]interface{}, cache *Cache) (analysis.DateTimeParser, error) -type DateTimeParserRegistry map[string]DateTimeParserConstructor - -type DateTimeParserCache struct { - *ConcurrentCache -} - -func NewDateTimeParserCache() *DateTimeParserCache { - return &DateTimeParserCache{ - NewConcurrentCache(), - } -} - -func DateTimeParserBuild(name string, config map[string]interface{}, cache *Cache) (interface{}, error) { - cons, registered := dateTimeParsers[name] - if !registered { - return nil, fmt.Errorf("no date time parser with name or type '%s' registered", name) - } - dateTimeParser, err := cons(config, cache) - if err != nil { - return nil, fmt.Errorf("error building date time parser: %v", err) - } - return dateTimeParser, nil -} - -func (c *DateTimeParserCache) DateTimeParserNamed(name string, cache *Cache) (analysis.DateTimeParser, error) { - item, err := c.ItemNamed(name, cache, DateTimeParserBuild) - if err != nil { - return nil, err - } - return item.(analysis.DateTimeParser), nil -} - -func (c *DateTimeParserCache) DefineDateTimeParser(name string, typ string, config map[string]interface{}, cache *Cache) (analysis.DateTimeParser, error) { - item, err := c.DefineItem(name, typ, config, cache, DateTimeParserBuild) - if err != nil { - if err == ErrAlreadyDefined { - return nil, fmt.Errorf("date time parser named '%s' already defined", name) - } - return nil, err - } - return item.(analysis.DateTimeParser), nil -} - -func DateTimeParserTypesAndInstances() ([]string, []string) { - emptyConfig := map[string]interface{}{} - emptyCache := NewCache() - var types []string - var instances []string - for name, cons := range dateTimeParsers { - _, err := cons(emptyConfig, emptyCache) - if err == nil { - instances = append(instances, name) - } else { - types = append(types, name) - } - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/fragment_formatter.go b/vendor/github.com/blevesearch/bleve/v2/registry/fragment_formatter.go deleted file mode 100644 index 6699f53ba..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/fragment_formatter.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/search/highlight" -) - -func RegisterFragmentFormatter(name string, constructor FragmentFormatterConstructor) { - _, exists := fragmentFormatters[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate fragment formatter named '%s'", name)) - } - fragmentFormatters[name] = constructor -} - -type FragmentFormatterConstructor func(config map[string]interface{}, cache *Cache) (highlight.FragmentFormatter, error) -type FragmentFormatterRegistry map[string]FragmentFormatterConstructor - -type FragmentFormatterCache struct { - *ConcurrentCache -} - -func NewFragmentFormatterCache() *FragmentFormatterCache { - return &FragmentFormatterCache{ - NewConcurrentCache(), - } -} - -func FragmentFormatterBuild(name string, config map[string]interface{}, cache *Cache) (interface{}, error) { - cons, registered := fragmentFormatters[name] - if !registered { - return nil, fmt.Errorf("no fragment formatter with name or type '%s' registered", name) - } - fragmentFormatter, err := cons(config, cache) - if err != nil { - return nil, fmt.Errorf("error building fragment formatter: %v", err) - } - return fragmentFormatter, nil -} - -func (c *FragmentFormatterCache) FragmentFormatterNamed(name string, cache *Cache) (highlight.FragmentFormatter, error) { - item, err := c.ItemNamed(name, cache, FragmentFormatterBuild) - if err != nil { - return nil, err - } - return item.(highlight.FragmentFormatter), nil -} - -func (c *FragmentFormatterCache) DefineFragmentFormatter(name string, typ string, config map[string]interface{}, cache *Cache) (highlight.FragmentFormatter, error) { - item, err := c.DefineItem(name, typ, config, cache, FragmentFormatterBuild) - if err != nil { - if err == ErrAlreadyDefined { - return nil, fmt.Errorf("fragment formatter named '%s' already defined", name) - } - return nil, err - } - return item.(highlight.FragmentFormatter), nil -} - -func FragmentFormatterTypesAndInstances() ([]string, []string) { - emptyConfig := map[string]interface{}{} - emptyCache := NewCache() - var types []string - var instances []string - for name, cons := range fragmentFormatters { - _, err := cons(emptyConfig, emptyCache) - if err == nil { - instances = append(instances, name) - } else { - types = append(types, name) - } - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/fragmenter.go b/vendor/github.com/blevesearch/bleve/v2/registry/fragmenter.go deleted file mode 100644 index cd1e32d28..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/fragmenter.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/search/highlight" -) - -func RegisterFragmenter(name string, constructor FragmenterConstructor) { - _, exists := fragmenters[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate fragmenter named '%s'", name)) - } - fragmenters[name] = constructor -} - -type FragmenterConstructor func(config map[string]interface{}, cache *Cache) (highlight.Fragmenter, error) -type FragmenterRegistry map[string]FragmenterConstructor - -type FragmenterCache struct { - *ConcurrentCache -} - -func NewFragmenterCache() *FragmenterCache { - return &FragmenterCache{ - NewConcurrentCache(), - } -} - -func FragmenterBuild(name string, config map[string]interface{}, cache *Cache) (interface{}, error) { - cons, registered := fragmenters[name] - if !registered { - return nil, fmt.Errorf("no fragmenter with name or type '%s' registered", name) - } - fragmenter, err := cons(config, cache) - if err != nil { - return nil, fmt.Errorf("error building fragmenter: %v", err) - } - return fragmenter, nil -} - -func (c *FragmenterCache) FragmenterNamed(name string, cache *Cache) (highlight.Fragmenter, error) { - item, err := c.ItemNamed(name, cache, FragmenterBuild) - if err != nil { - return nil, err - } - return item.(highlight.Fragmenter), nil -} - -func (c *FragmenterCache) DefineFragmenter(name string, typ string, config map[string]interface{}, cache *Cache) (highlight.Fragmenter, error) { - item, err := c.DefineItem(name, typ, config, cache, FragmenterBuild) - if err != nil { - if err == ErrAlreadyDefined { - return nil, fmt.Errorf("fragmenter named '%s' already defined", name) - } - return nil, err - } - return item.(highlight.Fragmenter), nil -} - -func FragmenterTypesAndInstances() ([]string, []string) { - emptyConfig := map[string]interface{}{} - emptyCache := NewCache() - var types []string - var instances []string - for name, cons := range fragmenters { - _, err := cons(emptyConfig, emptyCache) - if err == nil { - instances = append(instances, name) - } else { - types = append(types, name) - } - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/highlighter.go b/vendor/github.com/blevesearch/bleve/v2/registry/highlighter.go deleted file mode 100644 index 8eb210fb3..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/highlighter.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/search/highlight" -) - -func RegisterHighlighter(name string, constructor HighlighterConstructor) { - _, exists := highlighters[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate highlighter named '%s'", name)) - } - highlighters[name] = constructor -} - -type HighlighterConstructor func(config map[string]interface{}, cache *Cache) (highlight.Highlighter, error) -type HighlighterRegistry map[string]HighlighterConstructor - -type HighlighterCache struct { - *ConcurrentCache -} - -func NewHighlighterCache() *HighlighterCache { - return &HighlighterCache{ - NewConcurrentCache(), - } -} - -func HighlighterBuild(name string, config map[string]interface{}, cache *Cache) (interface{}, error) { - cons, registered := highlighters[name] - if !registered { - return nil, fmt.Errorf("no highlighter with name or type '%s' registered", name) - } - highlighter, err := cons(config, cache) - if err != nil { - return nil, fmt.Errorf("error building highlighter: %v", err) - } - return highlighter, nil -} - -func (c *HighlighterCache) HighlighterNamed(name string, cache *Cache) (highlight.Highlighter, error) { - item, err := c.ItemNamed(name, cache, HighlighterBuild) - if err != nil { - return nil, err - } - return item.(highlight.Highlighter), nil -} - -func (c *HighlighterCache) DefineHighlighter(name string, typ string, config map[string]interface{}, cache *Cache) (highlight.Highlighter, error) { - item, err := c.DefineItem(name, typ, config, cache, HighlighterBuild) - if err != nil { - if err == ErrAlreadyDefined { - return nil, fmt.Errorf("highlighter named '%s' already defined", name) - } - return nil, err - } - return item.(highlight.Highlighter), nil -} - -func HighlighterTypesAndInstances() ([]string, []string) { - emptyConfig := map[string]interface{}{} - emptyCache := NewCache() - var types []string - var instances []string - for name, cons := range highlighters { - _, err := cons(emptyConfig, emptyCache) - if err == nil { - instances = append(instances, name) - } else { - types = append(types, name) - } - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/index_type.go b/vendor/github.com/blevesearch/bleve/v2/registry/index_type.go deleted file mode 100644 index 67938c4af..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/index_type.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - index "github.com/blevesearch/bleve_index_api" -) - -func RegisterIndexType(name string, constructor IndexTypeConstructor) { - _, exists := indexTypes[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate index encoding named '%s'", name)) - } - indexTypes[name] = constructor -} - -type IndexTypeConstructor func(storeName string, storeConfig map[string]interface{}, analysisQueue *index.AnalysisQueue) (index.Index, error) -type IndexTypeRegistry map[string]IndexTypeConstructor - -func IndexTypeConstructorByName(name string) IndexTypeConstructor { - return indexTypes[name] -} - -func IndexTypesAndInstances() ([]string, []string) { - var types []string - var instances []string - for name := range stores { - types = append(types, name) - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/registry.go b/vendor/github.com/blevesearch/bleve/v2/registry/registry.go deleted file mode 100644 index 02125e64f..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/registry.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/search/highlight" -) - -var stores = make(KVStoreRegistry, 0) -var indexTypes = make(IndexTypeRegistry, 0) - -// highlight -var fragmentFormatters = make(FragmentFormatterRegistry, 0) -var fragmenters = make(FragmenterRegistry, 0) -var highlighters = make(HighlighterRegistry, 0) - -// analysis -var charFilters = make(CharFilterRegistry, 0) -var tokenizers = make(TokenizerRegistry, 0) -var tokenMaps = make(TokenMapRegistry, 0) -var tokenFilters = make(TokenFilterRegistry, 0) -var analyzers = make(AnalyzerRegistry, 0) -var dateTimeParsers = make(DateTimeParserRegistry, 0) - -type Cache struct { - CharFilters *CharFilterCache - Tokenizers *TokenizerCache - TokenMaps *TokenMapCache - TokenFilters *TokenFilterCache - Analyzers *AnalyzerCache - DateTimeParsers *DateTimeParserCache - FragmentFormatters *FragmentFormatterCache - Fragmenters *FragmenterCache - Highlighters *HighlighterCache -} - -func NewCache() *Cache { - return &Cache{ - CharFilters: NewCharFilterCache(), - Tokenizers: NewTokenizerCache(), - TokenMaps: NewTokenMapCache(), - TokenFilters: NewTokenFilterCache(), - Analyzers: NewAnalyzerCache(), - DateTimeParsers: NewDateTimeParserCache(), - FragmentFormatters: NewFragmentFormatterCache(), - Fragmenters: NewFragmenterCache(), - Highlighters: NewHighlighterCache(), - } -} - -func typeFromConfig(config map[string]interface{}) (string, error) { - prop, ok := config["type"] - if !ok { - return "", fmt.Errorf("'type' property is not defined") - } - typ, ok := prop.(string) - if !ok { - return "", fmt.Errorf("'type' property must be a string, not %T", prop) - } - return typ, nil -} - -func (c *Cache) CharFilterNamed(name string) (analysis.CharFilter, error) { - return c.CharFilters.CharFilterNamed(name, c) -} - -func (c *Cache) DefineCharFilter(name string, config map[string]interface{}) (analysis.CharFilter, error) { - typ, err := typeFromConfig(config) - if err != nil { - return nil, err - } - return c.CharFilters.DefineCharFilter(name, typ, config, c) -} - -func (c *Cache) TokenizerNamed(name string) (analysis.Tokenizer, error) { - return c.Tokenizers.TokenizerNamed(name, c) -} - -func (c *Cache) DefineTokenizer(name string, config map[string]interface{}) (analysis.Tokenizer, error) { - typ, err := typeFromConfig(config) - if err != nil { - return nil, fmt.Errorf("cannot resolve '%s' tokenizer type: %s", name, err) - } - return c.Tokenizers.DefineTokenizer(name, typ, config, c) -} - -func (c *Cache) TokenMapNamed(name string) (analysis.TokenMap, error) { - return c.TokenMaps.TokenMapNamed(name, c) -} - -func (c *Cache) DefineTokenMap(name string, config map[string]interface{}) (analysis.TokenMap, error) { - typ, err := typeFromConfig(config) - if err != nil { - return nil, err - } - return c.TokenMaps.DefineTokenMap(name, typ, config, c) -} - -func (c *Cache) TokenFilterNamed(name string) (analysis.TokenFilter, error) { - return c.TokenFilters.TokenFilterNamed(name, c) -} - -func (c *Cache) DefineTokenFilter(name string, config map[string]interface{}) (analysis.TokenFilter, error) { - typ, err := typeFromConfig(config) - if err != nil { - return nil, err - } - return c.TokenFilters.DefineTokenFilter(name, typ, config, c) -} - -func (c *Cache) AnalyzerNamed(name string) (*analysis.Analyzer, error) { - return c.Analyzers.AnalyzerNamed(name, c) -} - -func (c *Cache) DefineAnalyzer(name string, config map[string]interface{}) (*analysis.Analyzer, error) { - typ, err := typeFromConfig(config) - if err != nil { - return nil, err - } - return c.Analyzers.DefineAnalyzer(name, typ, config, c) -} - -func (c *Cache) DateTimeParserNamed(name string) (analysis.DateTimeParser, error) { - return c.DateTimeParsers.DateTimeParserNamed(name, c) -} - -func (c *Cache) DefineDateTimeParser(name string, config map[string]interface{}) (analysis.DateTimeParser, error) { - typ, err := typeFromConfig(config) - if err != nil { - return nil, err - } - return c.DateTimeParsers.DefineDateTimeParser(name, typ, config, c) -} - -func (c *Cache) FragmentFormatterNamed(name string) (highlight.FragmentFormatter, error) { - return c.FragmentFormatters.FragmentFormatterNamed(name, c) -} - -func (c *Cache) DefineFragmentFormatter(name string, config map[string]interface{}) (highlight.FragmentFormatter, error) { - typ, err := typeFromConfig(config) - if err != nil { - return nil, err - } - return c.FragmentFormatters.DefineFragmentFormatter(name, typ, config, c) -} - -func (c *Cache) FragmenterNamed(name string) (highlight.Fragmenter, error) { - return c.Fragmenters.FragmenterNamed(name, c) -} - -func (c *Cache) DefineFragmenter(name string, config map[string]interface{}) (highlight.Fragmenter, error) { - typ, err := typeFromConfig(config) - if err != nil { - return nil, err - } - return c.Fragmenters.DefineFragmenter(name, typ, config, c) -} - -func (c *Cache) HighlighterNamed(name string) (highlight.Highlighter, error) { - return c.Highlighters.HighlighterNamed(name, c) -} - -func (c *Cache) DefineHighlighter(name string, config map[string]interface{}) (highlight.Highlighter, error) { - typ, err := typeFromConfig(config) - if err != nil { - return nil, err - } - return c.Highlighters.DefineHighlighter(name, typ, config, c) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/store.go b/vendor/github.com/blevesearch/bleve/v2/registry/store.go deleted file mode 100644 index 02ebd888c..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/store.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/upsidedown_store_api" -) - -func RegisterKVStore(name string, constructor KVStoreConstructor) { - _, exists := stores[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate store named '%s'", name)) - } - stores[name] = constructor -} - -// KVStoreConstructor is used to build a KVStore of a specific type when -// specificied by the index configuration. In addition to meeting the -// store.KVStore interface, KVStores must also support this constructor. -// Note that currently the values of config must -// be able to be marshaled and unmarshaled using the encoding/json library (used -// when reading/writing the index metadata file). -type KVStoreConstructor func(mo store.MergeOperator, config map[string]interface{}) (store.KVStore, error) -type KVStoreRegistry map[string]KVStoreConstructor - -func KVStoreConstructorByName(name string) KVStoreConstructor { - return stores[name] -} - -func KVStoreTypesAndInstances() ([]string, []string) { - var types []string - var instances []string - for name := range stores { - types = append(types, name) - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/token_filter.go b/vendor/github.com/blevesearch/bleve/v2/registry/token_filter.go deleted file mode 100644 index df39411ae..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/token_filter.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/analysis" -) - -func RegisterTokenFilter(name string, constructor TokenFilterConstructor) { - _, exists := tokenFilters[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate token filter named '%s'", name)) - } - tokenFilters[name] = constructor -} - -type TokenFilterConstructor func(config map[string]interface{}, cache *Cache) (analysis.TokenFilter, error) -type TokenFilterRegistry map[string]TokenFilterConstructor - -type TokenFilterCache struct { - *ConcurrentCache -} - -func NewTokenFilterCache() *TokenFilterCache { - return &TokenFilterCache{ - NewConcurrentCache(), - } -} - -func TokenFilterBuild(name string, config map[string]interface{}, cache *Cache) (interface{}, error) { - cons, registered := tokenFilters[name] - if !registered { - return nil, fmt.Errorf("no token filter with name or type '%s' registered", name) - } - tokenFilter, err := cons(config, cache) - if err != nil { - return nil, fmt.Errorf("error building token filter: %v", err) - } - return tokenFilter, nil -} - -func (c *TokenFilterCache) TokenFilterNamed(name string, cache *Cache) (analysis.TokenFilter, error) { - item, err := c.ItemNamed(name, cache, TokenFilterBuild) - if err != nil { - return nil, err - } - return item.(analysis.TokenFilter), nil -} - -func (c *TokenFilterCache) DefineTokenFilter(name string, typ string, config map[string]interface{}, cache *Cache) (analysis.TokenFilter, error) { - item, err := c.DefineItem(name, typ, config, cache, TokenFilterBuild) - if err != nil { - if err == ErrAlreadyDefined { - return nil, fmt.Errorf("token filter named '%s' already defined", name) - } - return nil, err - } - return item.(analysis.TokenFilter), nil -} - -func TokenFilterTypesAndInstances() ([]string, []string) { - emptyConfig := map[string]interface{}{} - emptyCache := NewCache() - var types []string - var instances []string - for name, cons := range tokenFilters { - _, err := cons(emptyConfig, emptyCache) - if err == nil { - instances = append(instances, name) - } else { - types = append(types, name) - } - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/token_maps.go b/vendor/github.com/blevesearch/bleve/v2/registry/token_maps.go deleted file mode 100644 index 08c9956eb..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/token_maps.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/analysis" -) - -func RegisterTokenMap(name string, constructor TokenMapConstructor) { - _, exists := tokenMaps[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate token map named '%s'", name)) - } - tokenMaps[name] = constructor -} - -type TokenMapConstructor func(config map[string]interface{}, cache *Cache) (analysis.TokenMap, error) -type TokenMapRegistry map[string]TokenMapConstructor - -type TokenMapCache struct { - *ConcurrentCache -} - -func NewTokenMapCache() *TokenMapCache { - return &TokenMapCache{ - NewConcurrentCache(), - } -} - -func TokenMapBuild(name string, config map[string]interface{}, cache *Cache) (interface{}, error) { - cons, registered := tokenMaps[name] - if !registered { - return nil, fmt.Errorf("no token map with name or type '%s' registered", name) - } - tokenMap, err := cons(config, cache) - if err != nil { - return nil, fmt.Errorf("error building token map: %v", err) - } - return tokenMap, nil -} - -func (c *TokenMapCache) TokenMapNamed(name string, cache *Cache) (analysis.TokenMap, error) { - item, err := c.ItemNamed(name, cache, TokenMapBuild) - if err != nil { - return nil, err - } - return item.(analysis.TokenMap), nil -} - -func (c *TokenMapCache) DefineTokenMap(name string, typ string, config map[string]interface{}, cache *Cache) (analysis.TokenMap, error) { - item, err := c.DefineItem(name, typ, config, cache, TokenMapBuild) - if err != nil { - if err == ErrAlreadyDefined { - return nil, fmt.Errorf("token map named '%s' already defined", name) - } - return nil, err - } - return item.(analysis.TokenMap), nil -} - -func TokenMapTypesAndInstances() ([]string, []string) { - emptyConfig := map[string]interface{}{} - emptyCache := NewCache() - var types []string - var instances []string - for name, cons := range tokenMaps { - _, err := cons(emptyConfig, emptyCache) - if err == nil { - instances = append(instances, name) - } else { - types = append(types, name) - } - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/tokenizer.go b/vendor/github.com/blevesearch/bleve/v2/registry/tokenizer.go deleted file mode 100644 index eb954287c..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/registry/tokenizer.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/analysis" -) - -func RegisterTokenizer(name string, constructor TokenizerConstructor) { - _, exists := tokenizers[name] - if exists { - panic(fmt.Errorf("attempted to register duplicate tokenizer named '%s'", name)) - } - tokenizers[name] = constructor -} - -type TokenizerConstructor func(config map[string]interface{}, cache *Cache) (analysis.Tokenizer, error) -type TokenizerRegistry map[string]TokenizerConstructor - -type TokenizerCache struct { - *ConcurrentCache -} - -func NewTokenizerCache() *TokenizerCache { - return &TokenizerCache{ - NewConcurrentCache(), - } -} - -func TokenizerBuild(name string, config map[string]interface{}, cache *Cache) (interface{}, error) { - cons, registered := tokenizers[name] - if !registered { - return nil, fmt.Errorf("no tokenizer with name or type '%s' registered", name) - } - tokenizer, err := cons(config, cache) - if err != nil { - return nil, fmt.Errorf("error building tokenizer: %v", err) - } - return tokenizer, nil -} - -func (c *TokenizerCache) TokenizerNamed(name string, cache *Cache) (analysis.Tokenizer, error) { - item, err := c.ItemNamed(name, cache, TokenizerBuild) - if err != nil { - return nil, err - } - return item.(analysis.Tokenizer), nil -} - -func (c *TokenizerCache) DefineTokenizer(name string, typ string, config map[string]interface{}, cache *Cache) (analysis.Tokenizer, error) { - item, err := c.DefineItem(name, typ, config, cache, TokenizerBuild) - if err != nil { - if err == ErrAlreadyDefined { - return nil, fmt.Errorf("tokenizer named '%s' already defined", name) - } - return nil, err - } - return item.(analysis.Tokenizer), nil -} - -func TokenizerTypesAndInstances() ([]string, []string) { - emptyConfig := map[string]interface{}{} - emptyCache := NewCache() - var types []string - var instances []string - for name, cons := range tokenizers { - _, err := cons(emptyConfig, emptyCache) - if err == nil { - instances = append(instances, name) - } else { - types = append(types, name) - } - } - return types, instances -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search.go b/vendor/github.com/blevesearch/bleve/v2/search.go deleted file mode 100644 index 7397f566e..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bleve - -import ( - "encoding/json" - "fmt" - "reflect" - "sort" - "time" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/analysis/datetime/optional" - "github.com/blevesearch/bleve/v2/document" - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/collector" - "github.com/blevesearch/bleve/v2/search/query" - "github.com/blevesearch/bleve/v2/size" -) - -var reflectStaticSizeSearchResult int -var reflectStaticSizeSearchStatus int - -func init() { - var sr SearchResult - reflectStaticSizeSearchResult = int(reflect.TypeOf(sr).Size()) - var ss SearchStatus - reflectStaticSizeSearchStatus = int(reflect.TypeOf(ss).Size()) -} - -var cache = registry.NewCache() - -const defaultDateTimeParser = optional.Name - -type numericRange struct { - Name string `json:"name,omitempty"` - Min *float64 `json:"min,omitempty"` - Max *float64 `json:"max,omitempty"` -} - -type dateTimeRange struct { - Name string `json:"name,omitempty"` - Start time.Time `json:"start,omitempty"` - End time.Time `json:"end,omitempty"` - startString *string - endString *string -} - -func (dr *dateTimeRange) ParseDates(dateTimeParser analysis.DateTimeParser) (start, end time.Time) { - start = dr.Start - if dr.Start.IsZero() && dr.startString != nil { - s, err := dateTimeParser.ParseDateTime(*dr.startString) - if err == nil { - start = s - } - } - end = dr.End - if dr.End.IsZero() && dr.endString != nil { - e, err := dateTimeParser.ParseDateTime(*dr.endString) - if err == nil { - end = e - } - } - return start, end -} - -func (dr *dateTimeRange) UnmarshalJSON(input []byte) error { - var temp struct { - Name string `json:"name,omitempty"` - Start *string `json:"start,omitempty"` - End *string `json:"end,omitempty"` - } - - err := json.Unmarshal(input, &temp) - if err != nil { - return err - } - - dr.Name = temp.Name - if temp.Start != nil { - dr.startString = temp.Start - } - if temp.End != nil { - dr.endString = temp.End - } - - return nil -} - -func (dr *dateTimeRange) MarshalJSON() ([]byte, error) { - rv := map[string]interface{}{ - "name": dr.Name, - "start": dr.Start, - "end": dr.End, - } - if dr.Start.IsZero() && dr.startString != nil { - rv["start"] = dr.startString - } - if dr.End.IsZero() && dr.endString != nil { - rv["end"] = dr.endString - } - return json.Marshal(rv) -} - -// A FacetRequest describes a facet or aggregation -// of the result document set you would like to be -// built. -type FacetRequest struct { - Size int `json:"size"` - Field string `json:"field"` - NumericRanges []*numericRange `json:"numeric_ranges,omitempty"` - DateTimeRanges []*dateTimeRange `json:"date_ranges,omitempty"` -} - -func (fr *FacetRequest) Validate() error { - nrCount := len(fr.NumericRanges) - drCount := len(fr.DateTimeRanges) - if nrCount > 0 && drCount > 0 { - return fmt.Errorf("facet can only conain numeric ranges or date ranges, not both") - } - - if nrCount > 0 { - nrNames := map[string]interface{}{} - for _, nr := range fr.NumericRanges { - if _, ok := nrNames[nr.Name]; ok { - return fmt.Errorf("numeric ranges contains duplicate name '%s'", nr.Name) - } - nrNames[nr.Name] = struct{}{} - if nr.Min == nil && nr.Max == nil { - return fmt.Errorf("numeric range query must specify either min, max or both for range name '%s'", nr.Name) - } - } - - } else { - dateTimeParser, err := cache.DateTimeParserNamed(defaultDateTimeParser) - if err != nil { - return err - } - drNames := map[string]interface{}{} - for _, dr := range fr.DateTimeRanges { - if _, ok := drNames[dr.Name]; ok { - return fmt.Errorf("date ranges contains duplicate name '%s'", dr.Name) - } - drNames[dr.Name] = struct{}{} - start, end := dr.ParseDates(dateTimeParser) - if start.IsZero() && end.IsZero() { - return fmt.Errorf("date range query must specify either start, end or both for range name '%s'", dr.Name) - } - } - } - return nil -} - -// NewFacetRequest creates a facet on the specified -// field that limits the number of entries to the -// specified size. -func NewFacetRequest(field string, size int) *FacetRequest { - return &FacetRequest{ - Field: field, - Size: size, - } -} - -// AddDateTimeRange adds a bucket to a field -// containing date values. Documents with a -// date value falling into this range are tabulated -// as part of this bucket/range. -func (fr *FacetRequest) AddDateTimeRange(name string, start, end time.Time) { - if fr.DateTimeRanges == nil { - fr.DateTimeRanges = make([]*dateTimeRange, 0, 1) - } - fr.DateTimeRanges = append(fr.DateTimeRanges, &dateTimeRange{Name: name, Start: start, End: end}) -} - -// AddDateTimeRangeString adds a bucket to a field -// containing date values. -func (fr *FacetRequest) AddDateTimeRangeString(name string, start, end *string) { - if fr.DateTimeRanges == nil { - fr.DateTimeRanges = make([]*dateTimeRange, 0, 1) - } - fr.DateTimeRanges = append(fr.DateTimeRanges, - &dateTimeRange{Name: name, startString: start, endString: end}) -} - -// AddNumericRange adds a bucket to a field -// containing numeric values. Documents with a -// numeric value falling into this range are -// tabulated as part of this bucket/range. -func (fr *FacetRequest) AddNumericRange(name string, min, max *float64) { - if fr.NumericRanges == nil { - fr.NumericRanges = make([]*numericRange, 0, 1) - } - fr.NumericRanges = append(fr.NumericRanges, &numericRange{Name: name, Min: min, Max: max}) -} - -// FacetsRequest groups together all the -// FacetRequest objects for a single query. -type FacetsRequest map[string]*FacetRequest - -func (fr FacetsRequest) Validate() error { - for _, v := range fr { - err := v.Validate() - if err != nil { - return err - } - } - return nil -} - -// HighlightRequest describes how field matches -// should be highlighted. -type HighlightRequest struct { - Style *string `json:"style"` - Fields []string `json:"fields"` -} - -// NewHighlight creates a default -// HighlightRequest. -func NewHighlight() *HighlightRequest { - return &HighlightRequest{} -} - -// NewHighlightWithStyle creates a HighlightRequest -// with an alternate style. -func NewHighlightWithStyle(style string) *HighlightRequest { - return &HighlightRequest{ - Style: &style, - } -} - -func (h *HighlightRequest) AddField(field string) { - if h.Fields == nil { - h.Fields = make([]string, 0, 1) - } - h.Fields = append(h.Fields, field) -} - -// A SearchRequest describes all the parameters -// needed to search the index. -// Query is required. -// Size/From describe how much and which part of the -// result set to return. -// Highlight describes optional search result -// highlighting. -// Fields describes a list of field values which -// should be retrieved for result documents, provided they -// were stored while indexing. -// Facets describe the set of facets to be computed. -// Explain triggers inclusion of additional search -// result score explanations. -// Sort describes the desired order for the results to be returned. -// Score controls the kind of scoring performed -// SearchAfter supports deep paging by providing a minimum sort key -// SearchBefore supports deep paging by providing a maximum sort key -// sortFunc specifies the sort implementation to use for sorting results. -// -// A special field named "*" can be used to return all fields. -type SearchRequest struct { - Query query.Query `json:"query"` - Size int `json:"size"` - From int `json:"from"` - Highlight *HighlightRequest `json:"highlight"` - Fields []string `json:"fields"` - Facets FacetsRequest `json:"facets"` - Explain bool `json:"explain"` - Sort search.SortOrder `json:"sort"` - IncludeLocations bool `json:"includeLocations"` - Score string `json:"score,omitempty"` - SearchAfter []string `json:"search_after"` - SearchBefore []string `json:"search_before"` - - sortFunc func(sort.Interface) -} - -func (r *SearchRequest) Validate() error { - if srq, ok := r.Query.(query.ValidatableQuery); ok { - err := srq.Validate() - if err != nil { - return err - } - } - - if r.SearchAfter != nil && r.SearchBefore != nil { - return fmt.Errorf("cannot use search after and search before together") - } - - if r.SearchAfter != nil { - if r.From != 0 { - return fmt.Errorf("cannot use search after with from !=0") - } - if len(r.SearchAfter) != len(r.Sort) { - return fmt.Errorf("search after must have same size as sort order") - } - } - if r.SearchBefore != nil { - if r.From != 0 { - return fmt.Errorf("cannot use search before with from !=0") - } - if len(r.SearchBefore) != len(r.Sort) { - return fmt.Errorf("search before must have same size as sort order") - } - } - - return r.Facets.Validate() -} - -// AddFacet adds a FacetRequest to this SearchRequest -func (r *SearchRequest) AddFacet(facetName string, f *FacetRequest) { - if r.Facets == nil { - r.Facets = make(FacetsRequest, 1) - } - r.Facets[facetName] = f -} - -// SortBy changes the request to use the requested sort order -// this form uses the simplified syntax with an array of strings -// each string can either be a field name -// or the magic value _id and _score which refer to the doc id and search score -// any of these values can optionally be prefixed with - to reverse the order -func (r *SearchRequest) SortBy(order []string) { - so := search.ParseSortOrderStrings(order) - r.Sort = so -} - -// SortByCustom changes the request to use the requested sort order -func (r *SearchRequest) SortByCustom(order search.SortOrder) { - r.Sort = order -} - -// SetSearchAfter sets the request to skip over hits with a sort -// value less than the provided sort after key -func (r *SearchRequest) SetSearchAfter(after []string) { - r.SearchAfter = after -} - -// SetSearchBefore sets the request to skip over hits with a sort -// value greater than the provided sort before key -func (r *SearchRequest) SetSearchBefore(before []string) { - r.SearchBefore = before -} - -// UnmarshalJSON deserializes a JSON representation of -// a SearchRequest -func (r *SearchRequest) UnmarshalJSON(input []byte) error { - var temp struct { - Q json.RawMessage `json:"query"` - Size *int `json:"size"` - From int `json:"from"` - Highlight *HighlightRequest `json:"highlight"` - Fields []string `json:"fields"` - Facets FacetsRequest `json:"facets"` - Explain bool `json:"explain"` - Sort []json.RawMessage `json:"sort"` - IncludeLocations bool `json:"includeLocations"` - Score string `json:"score"` - SearchAfter []string `json:"search_after"` - SearchBefore []string `json:"search_before"` - } - - err := json.Unmarshal(input, &temp) - if err != nil { - return err - } - - if temp.Size == nil { - r.Size = 10 - } else { - r.Size = *temp.Size - } - if temp.Sort == nil { - r.Sort = search.SortOrder{&search.SortScore{Desc: true}} - } else { - r.Sort, err = search.ParseSortOrderJSON(temp.Sort) - if err != nil { - return err - } - } - r.From = temp.From - r.Explain = temp.Explain - r.Highlight = temp.Highlight - r.Fields = temp.Fields - r.Facets = temp.Facets - r.IncludeLocations = temp.IncludeLocations - r.Score = temp.Score - r.SearchAfter = temp.SearchAfter - r.SearchBefore = temp.SearchBefore - r.Query, err = query.ParseQuery(temp.Q) - if err != nil { - return err - } - - if r.Size < 0 { - r.Size = 10 - } - if r.From < 0 { - r.From = 0 - } - - return nil - -} - -// NewSearchRequest creates a new SearchRequest -// for the Query, using default values for all -// other search parameters. -func NewSearchRequest(q query.Query) *SearchRequest { - return NewSearchRequestOptions(q, 10, 0, false) -} - -// NewSearchRequestOptions creates a new SearchRequest -// for the Query, with the requested size, from -// and explanation search parameters. -// By default results are ordered by score, descending. -func NewSearchRequestOptions(q query.Query, size, from int, explain bool) *SearchRequest { - return &SearchRequest{ - Query: q, - Size: size, - From: from, - Explain: explain, - Sort: search.SortOrder{&search.SortScore{Desc: true}}, - } -} - -// IndexErrMap tracks errors with the name of the index where it occurred -type IndexErrMap map[string]error - -// MarshalJSON seralizes the error into a string for JSON consumption -func (iem IndexErrMap) MarshalJSON() ([]byte, error) { - tmp := make(map[string]string, len(iem)) - for k, v := range iem { - tmp[k] = v.Error() - } - return json.Marshal(tmp) -} - -func (iem IndexErrMap) UnmarshalJSON(data []byte) error { - var tmp map[string]string - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - for k, v := range tmp { - iem[k] = fmt.Errorf("%s", v) - } - return nil -} - -// SearchStatus is a secion in the SearchResult reporting how many -// underlying indexes were queried, how many were successful/failed -// and a map of any errors that were encountered -type SearchStatus struct { - Total int `json:"total"` - Failed int `json:"failed"` - Successful int `json:"successful"` - Errors IndexErrMap `json:"errors,omitempty"` -} - -// Merge will merge together multiple SearchStatuses during a MultiSearch -func (ss *SearchStatus) Merge(other *SearchStatus) { - ss.Total += other.Total - ss.Failed += other.Failed - ss.Successful += other.Successful - if len(other.Errors) > 0 { - if ss.Errors == nil { - ss.Errors = make(map[string]error) - } - for otherIndex, otherError := range other.Errors { - ss.Errors[otherIndex] = otherError - } - } -} - -// A SearchResult describes the results of executing -// a SearchRequest. -type SearchResult struct { - Status *SearchStatus `json:"status"` - Request *SearchRequest `json:"request"` - Hits search.DocumentMatchCollection `json:"hits"` - Total uint64 `json:"total_hits"` - MaxScore float64 `json:"max_score"` - Took time.Duration `json:"took"` - Facets search.FacetResults `json:"facets"` -} - -func (sr *SearchResult) Size() int { - sizeInBytes := reflectStaticSizeSearchResult + size.SizeOfPtr + - reflectStaticSizeSearchStatus - - for _, entry := range sr.Hits { - if entry != nil { - sizeInBytes += entry.Size() - } - } - - for k, v := range sr.Facets { - sizeInBytes += size.SizeOfString + len(k) + - v.Size() - } - - return sizeInBytes -} - -func (sr *SearchResult) String() string { - rv := "" - if sr.Total > 0 { - if sr.Request.Size > 0 { - rv = fmt.Sprintf("%d matches, showing %d through %d, took %s\n", sr.Total, sr.Request.From+1, sr.Request.From+len(sr.Hits), sr.Took) - for i, hit := range sr.Hits { - rv += fmt.Sprintf("%5d. %s (%f)\n", i+sr.Request.From+1, hit.ID, hit.Score) - for fragmentField, fragments := range hit.Fragments { - rv += fmt.Sprintf("\t%s\n", fragmentField) - for _, fragment := range fragments { - rv += fmt.Sprintf("\t\t%s\n", fragment) - } - } - for otherFieldName, otherFieldValue := range hit.Fields { - if _, ok := hit.Fragments[otherFieldName]; !ok { - rv += fmt.Sprintf("\t%s\n", otherFieldName) - rv += fmt.Sprintf("\t\t%v\n", otherFieldValue) - } - } - } - } else { - rv = fmt.Sprintf("%d matches, took %s\n", sr.Total, sr.Took) - } - } else { - rv = "No matches" - } - if len(sr.Facets) > 0 { - rv += fmt.Sprintf("Facets:\n") - for fn, f := range sr.Facets { - rv += fmt.Sprintf("%s(%d)\n", fn, f.Total) - for _, t := range f.Terms { - rv += fmt.Sprintf("\t%s(%d)\n", t.Term, t.Count) - } - if f.Other != 0 { - rv += fmt.Sprintf("\tOther(%d)\n", f.Other) - } - } - } - return rv -} - -// Merge will merge together multiple SearchResults during a MultiSearch -func (sr *SearchResult) Merge(other *SearchResult) { - sr.Status.Merge(other.Status) - sr.Hits = append(sr.Hits, other.Hits...) - sr.Total += other.Total - if other.MaxScore > sr.MaxScore { - sr.MaxScore = other.MaxScore - } - if sr.Facets == nil && len(other.Facets) != 0 { - sr.Facets = other.Facets - return - } - - sr.Facets.Merge(other.Facets) -} - -// MemoryNeededForSearchResult is an exported helper function to determine the RAM -// needed to accommodate the results for a given search request. -func MemoryNeededForSearchResult(req *SearchRequest) uint64 { - if req == nil { - return 0 - } - - numDocMatches := req.Size + req.From - if req.Size+req.From > collector.PreAllocSizeSkipCap { - numDocMatches = collector.PreAllocSizeSkipCap - } - - estimate := 0 - - // overhead from the SearchResult structure - var sr SearchResult - estimate += sr.Size() - - var dm search.DocumentMatch - sizeOfDocumentMatch := dm.Size() - - // overhead from results - estimate += numDocMatches * sizeOfDocumentMatch - - // overhead from facet results - if req.Facets != nil { - var fr search.FacetResult - estimate += len(req.Facets) * fr.Size() - } - - // overhead from fields, highlighting - var d document.Document - if len(req.Fields) > 0 || req.Highlight != nil { - numDocsApplicable := req.Size - if numDocsApplicable > collector.PreAllocSizeSkipCap { - numDocsApplicable = collector.PreAllocSizeSkipCap - } - estimate += numDocsApplicable * d.Size() - } - - return uint64(estimate) -} - -// SetSortFunc sets the sort implementation to use when sorting hits. -// -// SearchRequests can specify a custom sort implementation to meet -// their needs. For instance, by specifying a parallel sort -// that uses all available cores. -func (r *SearchRequest) SetSortFunc(s func(sort.Interface)) { - r.sortFunc = s -} - -// SortFunc returns the sort implementation to use when sorting hits. -// Defaults to sort.Sort. -func (r *SearchRequest) SortFunc() func(data sort.Interface) { - if r.sortFunc != nil { - return r.sortFunc - } - - return sort.Sort -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/collector.go b/vendor/github.com/blevesearch/bleve/v2/search/collector.go deleted file mode 100644 index 38e34fe7c..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/collector.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package search - -import ( - "context" - "time" - - index "github.com/blevesearch/bleve_index_api" -) - -type Collector interface { - Collect(ctx context.Context, searcher Searcher, reader index.IndexReader) error - Results() DocumentMatchCollection - Total() uint64 - MaxScore() float64 - Took() time.Duration - SetFacetsBuilder(facetsBuilder *FacetsBuilder) - FacetResults() FacetResults -} - -// DocumentMatchHandler is the type of document match callback -// bleve will invoke during the search. -// Eventually, bleve will indicate the completion of an ongoing search, -// by passing a nil value for the document match callback. -// The application should take a copy of the hit/documentMatch -// if it wish to own it or need prolonged access to it. -type DocumentMatchHandler func(hit *DocumentMatch) error - -type MakeDocumentMatchHandlerKeyType string - -var MakeDocumentMatchHandlerKey = MakeDocumentMatchHandlerKeyType( - "MakeDocumentMatchHandlerKey") - -// MakeDocumentMatchHandler is an optional DocumentMatchHandler -// builder function which the applications can pass to bleve. -// These builder methods gives a DocumentMatchHandler function -// to bleve, which it will invoke on every document matches. -type MakeDocumentMatchHandler func(ctx *SearchContext) ( - callback DocumentMatchHandler, loadID bool, err error) diff --git a/vendor/github.com/blevesearch/bleve/v2/search/collector/list.go b/vendor/github.com/blevesearch/bleve/v2/search/collector/list.go deleted file mode 100644 index 20d4c9d01..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/collector/list.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "container/list" - - "github.com/blevesearch/bleve/v2/search" -) - -type collectStoreList struct { - results *list.List - compare collectorCompare -} - -func newStoreList(capacity int, compare collectorCompare) *collectStoreList { - rv := &collectStoreList{ - results: list.New(), - compare: compare, - } - - return rv -} - -func (c *collectStoreList) AddNotExceedingSize(doc *search.DocumentMatch, size int) *search.DocumentMatch { - c.add(doc) - if c.len() > size { - return c.removeLast() - } - return nil -} - -func (c *collectStoreList) add(doc *search.DocumentMatch) { - for e := c.results.Front(); e != nil; e = e.Next() { - curr := e.Value.(*search.DocumentMatch) - if c.compare(doc, curr) >= 0 { - c.results.InsertBefore(doc, e) - return - } - } - // if we got to the end, we still have to add it - c.results.PushBack(doc) -} - -func (c *collectStoreList) removeLast() *search.DocumentMatch { - return c.results.Remove(c.results.Front()).(*search.DocumentMatch) -} - -func (c *collectStoreList) Final(skip int, fixup collectorFixup) (search.DocumentMatchCollection, error) { - if c.results.Len()-skip > 0 { - rv := make(search.DocumentMatchCollection, c.results.Len()-skip) - i := 0 - skipped := 0 - for e := c.results.Back(); e != nil; e = e.Prev() { - if skipped < skip { - skipped++ - continue - } - - rv[i] = e.Value.(*search.DocumentMatch) - err := fixup(rv[i]) - if err != nil { - return nil, err - } - i++ - } - return rv, nil - } - return search.DocumentMatchCollection{}, nil -} - -func (c *collectStoreList) len() int { - return c.results.Len() -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/collector/topn.go b/vendor/github.com/blevesearch/bleve/v2/search/collector/topn.go deleted file mode 100644 index aa1d65bd1..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/collector/topn.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "reflect" - "strconv" - "time" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeTopNCollector int - -func init() { - var coll TopNCollector - reflectStaticSizeTopNCollector = int(reflect.TypeOf(coll).Size()) -} - -type collectorStore interface { - // Add the document, and if the new store size exceeds the provided size - // the last element is removed and returned. If the size has not been - // exceeded, nil is returned. - AddNotExceedingSize(doc *search.DocumentMatch, size int) *search.DocumentMatch - - Final(skip int, fixup collectorFixup) (search.DocumentMatchCollection, error) -} - -// PreAllocSizeSkipCap will cap preallocation to this amount when -// size+skip exceeds this value -var PreAllocSizeSkipCap = 1000 - -type collectorCompare func(i, j *search.DocumentMatch) int - -type collectorFixup func(d *search.DocumentMatch) error - -// TopNCollector collects the top N hits, optionally skipping some results -type TopNCollector struct { - size int - skip int - total uint64 - maxScore float64 - took time.Duration - sort search.SortOrder - results search.DocumentMatchCollection - facetsBuilder *search.FacetsBuilder - - store collectorStore - - needDocIds bool - neededFields []string - cachedScoring []bool - cachedDesc []bool - - lowestMatchOutsideResults *search.DocumentMatch - updateFieldVisitor index.DocValueVisitor - dvReader index.DocValueReader - searchAfter *search.DocumentMatch -} - -// CheckDoneEvery controls how frequently we check the context deadline -const CheckDoneEvery = uint64(1024) - -// NewTopNCollector builds a collector to find the top 'size' hits -// skipping over the first 'skip' hits -// ordering hits by the provided sort order -func NewTopNCollector(size int, skip int, sort search.SortOrder) *TopNCollector { - return newTopNCollector(size, skip, sort) -} - -// NewTopNCollector builds a collector to find the top 'size' hits -// skipping over the first 'skip' hits -// ordering hits by the provided sort order -func NewTopNCollectorAfter(size int, sort search.SortOrder, after []string) *TopNCollector { - rv := newTopNCollector(size, 0, sort) - rv.searchAfter = &search.DocumentMatch{ - Sort: after, - } - - for pos, ss := range sort { - if ss.RequiresDocID() { - rv.searchAfter.ID = after[pos] - } - if ss.RequiresScoring() { - if score, err := strconv.ParseFloat(after[pos], 64); err == nil { - rv.searchAfter.Score = score - } - } - } - - return rv -} - -func newTopNCollector(size int, skip int, sort search.SortOrder) *TopNCollector { - hc := &TopNCollector{size: size, skip: skip, sort: sort} - - // pre-allocate space on the store to avoid reslicing - // unless the size + skip is too large, then cap it - // everything should still work, just reslices as necessary - backingSize := size + skip + 1 - if size+skip > PreAllocSizeSkipCap { - backingSize = PreAllocSizeSkipCap + 1 - } - - if size+skip > 10 { - hc.store = newStoreHeap(backingSize, func(i, j *search.DocumentMatch) int { - return hc.sort.Compare(hc.cachedScoring, hc.cachedDesc, i, j) - }) - } else { - hc.store = newStoreSlice(backingSize, func(i, j *search.DocumentMatch) int { - return hc.sort.Compare(hc.cachedScoring, hc.cachedDesc, i, j) - }) - } - - // these lookups traverse an interface, so do once up-front - if sort.RequiresDocID() { - hc.needDocIds = true - } - hc.neededFields = sort.RequiredFields() - hc.cachedScoring = sort.CacheIsScore() - hc.cachedDesc = sort.CacheDescending() - - return hc -} - -func (hc *TopNCollector) Size() int { - sizeInBytes := reflectStaticSizeTopNCollector + size.SizeOfPtr - - if hc.facetsBuilder != nil { - sizeInBytes += hc.facetsBuilder.Size() - } - - for _, entry := range hc.neededFields { - sizeInBytes += len(entry) + size.SizeOfString - } - - sizeInBytes += len(hc.cachedScoring) + len(hc.cachedDesc) - - return sizeInBytes -} - -// Collect goes to the index to find the matching documents -func (hc *TopNCollector) Collect(ctx context.Context, searcher search.Searcher, reader index.IndexReader) error { - startTime := time.Now() - var err error - var next *search.DocumentMatch - - // pre-allocate enough space in the DocumentMatchPool - // unless the size + skip is too large, then cap it - // everything should still work, just allocates DocumentMatches on demand - backingSize := hc.size + hc.skip + 1 - if hc.size+hc.skip > PreAllocSizeSkipCap { - backingSize = PreAllocSizeSkipCap + 1 - } - searchContext := &search.SearchContext{ - DocumentMatchPool: search.NewDocumentMatchPool(backingSize+searcher.DocumentMatchPoolSize(), len(hc.sort)), - Collector: hc, - IndexReader: reader, - } - - hc.dvReader, err = reader.DocValueReader(hc.neededFields) - if err != nil { - return err - } - - hc.updateFieldVisitor = func(field string, term []byte) { - if hc.facetsBuilder != nil { - hc.facetsBuilder.UpdateVisitor(field, term) - } - hc.sort.UpdateVisitor(field, term) - } - - dmHandlerMaker := MakeTopNDocumentMatchHandler - if cv := ctx.Value(search.MakeDocumentMatchHandlerKey); cv != nil { - dmHandlerMaker = cv.(search.MakeDocumentMatchHandler) - } - // use the application given builder for making the custom document match - // handler and perform callbacks/invocations on the newly made handler. - dmHandler, loadID, err := dmHandlerMaker(searchContext) - if err != nil { - return err - } - - hc.needDocIds = hc.needDocIds || loadID - - select { - case <-ctx.Done(): - return ctx.Err() - default: - next, err = searcher.Next(searchContext) - } - for err == nil && next != nil { - if hc.total%CheckDoneEvery == 0 { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - } - - err = hc.prepareDocumentMatch(searchContext, reader, next) - if err != nil { - break - } - - err = dmHandler(next) - if err != nil { - break - } - - next, err = searcher.Next(searchContext) - } - - // help finalize/flush the results in case - // of custom document match handlers. - err = dmHandler(nil) - if err != nil { - return err - } - - // compute search duration - hc.took = time.Since(startTime) - if err != nil { - return err - } - // finalize actual results - err = hc.finalizeResults(reader) - if err != nil { - return err - } - return nil -} - -var sortByScoreOpt = []string{"_score"} - -func (hc *TopNCollector) prepareDocumentMatch(ctx *search.SearchContext, - reader index.IndexReader, d *search.DocumentMatch) (err error) { - - // visit field terms for features that require it (sort, facets) - if len(hc.neededFields) > 0 { - err = hc.visitFieldTerms(reader, d) - if err != nil { - return err - } - } - - // increment total hits - hc.total++ - d.HitNumber = hc.total - - // update max score - if d.Score > hc.maxScore { - hc.maxScore = d.Score - } - - // see if we need to load ID (at this early stage, for example to sort on it) - if hc.needDocIds { - d.ID, err = reader.ExternalID(d.IndexInternalID) - if err != nil { - return err - } - } - - // compute this hits sort value - if len(hc.sort) == 1 && hc.cachedScoring[0] { - d.Sort = sortByScoreOpt - } else { - hc.sort.Value(d) - } - - return nil -} - -func MakeTopNDocumentMatchHandler( - ctx *search.SearchContext) (search.DocumentMatchHandler, bool, error) { - var hc *TopNCollector - var ok bool - if hc, ok = ctx.Collector.(*TopNCollector); ok { - return func(d *search.DocumentMatch) error { - if d == nil { - return nil - } - - // support search after based pagination, - // if this hit is <= the search after sort key - // we should skip it - if hc.searchAfter != nil { - // exact sort order matches use hit number to break tie - // but we want to allow for exact match, so we pretend - hc.searchAfter.HitNumber = d.HitNumber - if hc.sort.Compare(hc.cachedScoring, hc.cachedDesc, d, hc.searchAfter) <= 0 { - return nil - } - } - - // optimization, we track lowest sorting hit already removed from heap - // with this one comparison, we can avoid all heap operations if - // this hit would have been added and then immediately removed - if hc.lowestMatchOutsideResults != nil { - cmp := hc.sort.Compare(hc.cachedScoring, hc.cachedDesc, d, - hc.lowestMatchOutsideResults) - if cmp >= 0 { - // this hit can't possibly be in the result set, so avoid heap ops - ctx.DocumentMatchPool.Put(d) - return nil - } - } - - removed := hc.store.AddNotExceedingSize(d, hc.size+hc.skip) - if removed != nil { - if hc.lowestMatchOutsideResults == nil { - hc.lowestMatchOutsideResults = removed - } else { - cmp := hc.sort.Compare(hc.cachedScoring, hc.cachedDesc, - removed, hc.lowestMatchOutsideResults) - if cmp < 0 { - tmp := hc.lowestMatchOutsideResults - hc.lowestMatchOutsideResults = removed - ctx.DocumentMatchPool.Put(tmp) - } - } - } - return nil - }, false, nil - } - return nil, false, nil -} - -// visitFieldTerms is responsible for visiting the field terms of the -// search hit, and passing visited terms to the sort and facet builder -func (hc *TopNCollector) visitFieldTerms(reader index.IndexReader, d *search.DocumentMatch) error { - if hc.facetsBuilder != nil { - hc.facetsBuilder.StartDoc() - } - - err := hc.dvReader.VisitDocValues(d.IndexInternalID, hc.updateFieldVisitor) - if hc.facetsBuilder != nil { - hc.facetsBuilder.EndDoc() - } - - return err -} - -// SetFacetsBuilder registers a facet builder for this collector -func (hc *TopNCollector) SetFacetsBuilder(facetsBuilder *search.FacetsBuilder) { - hc.facetsBuilder = facetsBuilder - hc.neededFields = append(hc.neededFields, hc.facetsBuilder.RequiredFields()...) -} - -// finalizeResults starts with the heap containing the final top size+skip -// it now throws away the results to be skipped -// and does final doc id lookup (if necessary) -func (hc *TopNCollector) finalizeResults(r index.IndexReader) error { - var err error - hc.results, err = hc.store.Final(hc.skip, func(doc *search.DocumentMatch) error { - if doc.ID == "" { - // look up the id since we need it for lookup - var err error - doc.ID, err = r.ExternalID(doc.IndexInternalID) - if err != nil { - return err - } - } - doc.Complete(nil) - return nil - }) - - return err -} - -// Results returns the collected hits -func (hc *TopNCollector) Results() search.DocumentMatchCollection { - return hc.results -} - -// Total returns the total number of hits -func (hc *TopNCollector) Total() uint64 { - return hc.total -} - -// MaxScore returns the maximum score seen across all the hits -func (hc *TopNCollector) MaxScore() float64 { - return hc.maxScore -} - -// Took returns the time spent collecting hits -func (hc *TopNCollector) Took() time.Duration { - return hc.took -} - -// FacetResults returns the computed facets results -func (hc *TopNCollector) FacetResults() search.FacetResults { - if hc.facetsBuilder != nil { - return hc.facetsBuilder.Results() - } - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/facet/benchmark_data.txt b/vendor/github.com/blevesearch/bleve/v2/search/facet/benchmark_data.txt deleted file mode 100644 index b012f78ce..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/facet/benchmark_data.txt +++ /dev/null @@ -1,2909 +0,0 @@ -Boiling liquid expanding vapor explosion -From Wikipedia, the free encyclopedia -See also: Boiler explosion and Steam explosion - -Flames subsequent to a flammable liquid BLEVE from a tanker. BLEVEs do not necessarily involve fire. - -This article's tone or style may not reflect the encyclopedic tone used on Wikipedia. See Wikipedia's guide to writing better articles for suggestions. (July 2013) -A boiling liquid expanding vapor explosion (BLEVE, /ˈblɛviː/ blev-ee) is an explosion caused by the rupture of a vessel containing a pressurized liquid above its boiling point.[1] -Contents [hide] -1 Mechanism -1.1 Water example -1.2 BLEVEs without chemical reactions -2 Fires -3 Incidents -4 Safety measures -5 See also -6 References -7 External links -Mechanism[edit] - -This section needs additional citations for verification. Please help improve this article by adding citations to reliable sources. Unsourced material may be challenged and removed. (July 2013) -There are three characteristics of liquids which are relevant to the discussion of a BLEVE: -If a liquid in a sealed container is boiled, the pressure inside the container increases. As the liquid changes to a gas it expands - this expansion in a vented container would cause the gas and liquid to take up more space. In a sealed container the gas and liquid are not able to take up more space and so the pressure rises. Pressurized vessels containing liquids can reach an equilibrium where the liquid stops boiling and the pressure stops rising. This occurs when no more heat is being added to the system (either because it has reached ambient temperature or has had a heat source removed). -The boiling temperature of a liquid is dependent on pressure - high pressures will yield high boiling temperatures, and low pressures will yield low boiling temperatures. A common simple experiment is to place a cup of water in a vacuum chamber, and then reduce the pressure in the chamber until the water boils. By reducing the pressure the water will boil even at room temperature. This works both ways - if the pressure is increased beyond normal atmospheric pressures, the boiling of hot water could be suppressed far beyond normal temperatures. The cooling system of a modern internal combustion engine is a real-world example. -When a liquid boils it turns into a gas. The resulting gas takes up far more space than the liquid did. -Typically, a BLEVE starts with a container of liquid which is held above its normal, atmospheric-pressure boiling temperature. Many substances normally stored as liquids, such as CO2, oxygen, and other similar industrial gases have boiling temperatures, at atmospheric pressure, far below room temperature. In the case of water, a BLEVE could occur if a pressurized chamber of water is heated far beyond the standard 100 °C (212 °F). That container, because the boiling water pressurizes it, is capable of holding liquid water at very high temperatures. -If the pressurized vessel, containing liquid at high temperature (which may be room temperature, depending on the substance) ruptures, the pressure which prevents the liquid from boiling is lost. If the rupture is catastrophic, where the vessel is immediately incapable of holding any pressure at all, then there suddenly exists a large mass of liquid which is at very high temperature and very low pressure. This causes the entire volume of liquid to instantaneously boil, which in turn causes an extremely rapid expansion. Depending on temperatures, pressures and the substance involved, that expansion may be so rapid that it can be classified as an explosion, fully capable of inflicting severe damage on its surroundings. -Water example[edit] -Imagine, for example, a tank of pressurized liquid water held at 204.4 °C (400 °F). This vessel would normally be pressurized to 1.7 MPa (250 psi) above atmospheric ("gauge") pressure. Were the tank containing the water to split open, there would momentarily exist a volume of liquid water which is -at atmospheric pressure, and -204.4 °C (400 °F). -At atmospheric pressure the boiling point of water is 100 °C (212 °F) - liquid water at atmospheric pressure cannot exist at temperatures higher than 100 °C (212 °F). It is obvious, then, that 204.4 °C (400 °F) liquid water at atmospheric pressure must immediately flash to gas causing an explosion. -BLEVEs without chemical reactions[edit] -It is important to note that a BLEVE need not be a chemical explosion - nor does there need to be a fire - however if a flammable substance is subject to a BLEVE it may also be subject to intense heating, either from an external source of heat which may have caused the vessel to rupture in the first place or from an internal source of localized heating such as skin friction. This heating can cause a flammable substance to ignite, adding a secondary explosion caused by the primary BLEVE. While blast effects of any BLEVE can be devastating, a flammable substance such as propane can add significantly to the danger. -Bleve explosion.svg -While the term BLEVE is most often used to describe the results of a container of flammable liquid rupturing due to fire, a BLEVE can occur even with a non-flammable substance such as water,[2] liquid nitrogen,[3] liquid helium or other refrigerants or cryogens, and therefore is not usually considered a type of chemical explosion. -Fires[edit] -BLEVEs can be caused by an external fire near the storage vessel causing heating of the contents and pressure build-up. While tanks are often designed to withstand great pressure, constant heating can cause the metal to weaken and eventually fail. If the tank is being heated in an area where there is no liquid, it may rupture faster without the liquid to absorb the heat. Gas containers are usually equipped with relief valves that vent off excess pressure, but the tank can still fail if the pressure is not released quickly enough.[1] Relief valves are sized to release pressure fast enough to prevent the pressure from increasing beyond the strength of the vessel, but not so fast as to be the cause of an explosion. An appropriately sized relief valve will allow the liquid inside to boil slowly, maintaining a constant pressure in the vessel until all the liquid has boiled and the vessel empties. -If the substance involved is flammable, it is likely that the resulting cloud of the substance will ignite after the BLEVE has occurred, forming a fireball and possibly a fuel-air explosion, also termed a vapor cloud explosion (VCE). If the materials are toxic, a large area will be contaminated.[4] -Incidents[edit] -The term "BLEVE" was coined by three researchers at Factory Mutual, in the analysis of an accident there in 1957 involving a chemical reactor vessel.[5] -In August 1959 the Kansas City Fire Department suffered its largest ever loss of life in the line of duty, when a 25,000 gallon (95,000 litre) gas tank exploded during a fire on Southwest Boulevard killing five firefighters. This was the first time BLEVE was used to describe a burning fuel tank.[citation needed] -Later incidents included the Cheapside Street Whisky Bond Fire in Glasgow, Scotland in 1960; Feyzin, France in 1966; Crescent City, Illinois in 1970; Kingman, Arizona in 1973; a liquid nitrogen tank rupture[6] at Air Products and Chemicals and Mobay Chemical Company at New Martinsville, West Virginia on January 31, 1978 [1];Texas City, Texas in 1978; Murdock, Illinois in 1983; San Juan Ixhuatepec, Mexico City in 1984; and Toronto, Ontario in 2008. -Safety measures[edit] -[icon] This section requires expansion. (July 2013) -Some fire mitigation measures are listed under liquefied petroleum gas. -See also[edit] -Boiler explosion -Expansion ratio -Explosive boiling or phase explosion -Rapid phase transition -Viareggio train derailment -2008 Toronto explosions -Gas carriers -Los Alfaques Disaster -Lac-Mégantic derailment -References[edit] -^ Jump up to: a b Kletz, Trevor (March 1990). Critical Aspects of Safety and Loss Prevention. London: Butterworth–Heinemann. pp. 43–45. ISBN 0-408-04429-2. -Jump up ^ "Temperature Pressure Relief Valves on Water Heaters: test, inspect, replace, repair guide". Inspect-ny.com. Retrieved 2011-07-12. -Jump up ^ Liquid nitrogen BLEVE demo -Jump up ^ "Chemical Process Safety" (PDF). Retrieved 2011-07-12. -Jump up ^ David F. Peterson, BLEVE: Facts, Risk Factors, and Fallacies, Fire Engineering magazine (2002). -Jump up ^ "STATE EX REL. VAPOR CORP. v. NARICK". Supreme Court of Appeals of West Virginia. 1984-07-12. Retrieved 2014-03-16. -External links[edit] - Look up boiling liquid expanding vapor explosion in Wiktionary, the free dictionary. - Wikimedia Commons has media related to BLEVE. -BLEVE Demo on YouTube — video of a controlled BLEVE demo -huge explosions on YouTube — video of propane and isobutane BLEVEs from a train derailment at Murdock, Illinois (3 September 1983) -Propane BLEVE on YouTube — video of BLEVE from the Toronto propane depot fire -Moscow Ring Road Accident on YouTube - Dozens of LPG tank BLEVEs after a road accident in Moscow -Kingman, AZ BLEVE — An account of the 5 July 1973 explosion in Kingman, with photographs -Propane Tank Explosions — Description of circumstances required to cause a propane tank BLEVE. -Analysis of BLEVE Events at DOE Sites - Details physics and mathematics of BLEVEs. -HID - SAFETY REPORT ASSESSMENT GUIDE: Whisky Maturation Warehouses - The liquor is aged in wooden barrels that can suffer BLEVE. -Categories: ExplosivesFirefightingFireTypes of fireGas technologiesIndustrial fires and explosions -Navigation menu -Create accountLog inArticleTalkReadEditView history - -Main page -Contents -Featured content -Current events -Random article -Donate to Wikipedia -Wikimedia Shop -Interaction -Help -About Wikipedia -Community portal -Recent changes -Contact page -Tools -What links here -Related changes -Upload file -Special pages -Permanent link -Page information -Wikidata item -Cite this page -Print/export -Create a book -Download as PDF -Printable version -Languages -Català -Deutsch -Español -Français -Italiano -עברית -Nederlands -日本語 -Norsk bokmål -Polski -Português -Русский -Suomi -Edit links -This page was last modified on 18 November 2014 at 01:35. -Text is available under the Creative Commons Attribution-ShareAlike License; additional terms may apply. By using this site, you agree to the Terms of Use and Privacy Policy. Wikipedia® is a registered trademark of the Wikimedia Foundation, Inc., a non-profit organization. -Privacy policyAbout WikipediaDisclaimersContact WikipediaDevelopersMobile viewWikimedia Foundation Powered by MediaWiki - - -Thermobaric weapon -From Wikipedia, the free encyclopedia - -Blast from a US Navy fuel air explosive used against a decommissioned ship, USS McNulty, 1972. -A thermobaric weapon is a type of explosive that utilizes oxygen from the surrounding air to generate an intense, high-temperature explosion, and in practice the blast wave such a weapon produces is typically significantly longer in duration than a conventional condensed explosive. The fuel-air bomb is one of the most well-known types of thermobaric weapons. -Most conventional explosives consist of a fuel-oxidizer premix (gunpowder, for example, contains 25% fuel and 75% oxidizer), whereas thermobaric weapons are almost 100% fuel, so thermobaric weapons are significantly more energetic than conventional condensed explosives of equal weight. Their reliance on atmospheric oxygen makes them unsuitable for use underwater, at high altitude, and in adverse weather. They do, however, cause considerably more destruction when used inside confined environments such as tunnels, caves, and bunkers - partly due to the sustained blast wave, and partly by consuming the available oxygen inside those confined spaces. -There are many different types of thermobaric weapons rounds that can be fitted to hand-held launchers.[1] -Contents [hide] -1 Terminology -2 Mechanism -2.1 Fuel-air explosive -2.1.1 Effect -3 Development history -3.1 Soviet and Russian developments -3.2 US developments -4 History -4.1 Military use -4.2 Non-military use -5 See also -6 References -7 External links -Terminology[edit] -The term thermobaric is derived from the Greek words for "heat" and "pressure": thermobarikos (θερμοβαρικός), from thermos (θερμός), hot + baros (βάρος), weight, pressure + suffix -ikos (-ικός), suffix -ic. -Other terms used for this family of weapons are high-impulse thermobaric weapons (HITs), heat and pressure weapons, vacuum bombs, or fuel-air explosives (FAE or FAX). -Mechanism[edit] -In contrast to condensed explosive, where oxidation in a confined region produces a blast front from essentially a point source, a flame front accelerates to a large volume producing pressure fronts both within the mixture of fuel and oxidant and then in the surrounding air.[2] -Thermobaric explosives apply the principles underlying accidental unconfined vapor cloud explosions, which include those from dispersions of flammable dusts and droplets.[3] Previously, such explosions were most often encountered in flour mills and their storage containers, and later in coal mines; but, now, most commonly in discharged oil tankers and refineries, including an incident at Buncefield in the UK in 2005 where the blast wave woke people 150 kilometres (93 mi) from its centre.[4] -A typical weapon consists of a container packed with a fuel substance, in the center of which is a small conventional-explosive "scatter charge". Fuels are chosen on the basis of the exothermicity of their oxidation, ranging from powdered metals, such as aluminium or magnesium, to organic materials, possibly with a self-contained partial oxidant. The most recent development involves the use of nanofuels.[5][6] -A thermobaric bomb's effective yield requires the most appropriate combination of a number of factors; among these are how well the fuel is dispersed, how rapidly it mixes with the surrounding atmosphere, and the initiation of the igniter and its position relative to the container of fuel. In some designs, strong munitions cases allow the blast pressure to be contained long enough for the fuel to be heated up well above its auto-ignition temperature, so that once the container bursts the super-heated fuel will auto-ignite progressively as it comes into contact with atmospheric oxygen.[7][8][9][10][11][12][13][14][15][16][17] -Conventional upper and lower limits of flammability apply to such weapons. Close in, blast from the dispersal charge, compressing and heating the surrounding atmosphere, will have some influence on the lower limit. The upper limit has been demonstrated strongly to influence the ignition of fogs above pools of oil.[18] This weakness may be eliminated by designs where the fuel is preheated well above its ignition temperature, so that its cooling during its dispersion still results in a minimal ignition delay on mixing. The continual combustion of the outer layer of fuel molecules as they come into contact with the air, generates additional heat which maintains the temperature of the interior of the fireball, and thus sustains the detonation.[19][20][21] -In confinement, a series of reflective shock waves are generated,[22][23] which maintain the fireball and can extend its duration to between 10 and 50 ms as exothermic recombination reactions occur.[24] Further damage can result as the gases cool and pressure drops sharply, leading to a partial vacuum. This effect has given rise to the misnomer "vacuum bomb". Piston-type afterburning is also believed to occur in such structures, as flame-fronts accelerate through it.[25][26] -Fuel-air explosive[edit] -A fuel-air explosive (FAE) device consists of a container of fuel and two separate explosive charges. After the munition is dropped or fired, the first explosive charge bursts open the container at a predetermined height and disperses the fuel in a cloud that mixes with atmospheric oxygen (the size of the cloud varies with the size of the munition). The cloud of fuel flows around objects and into structures. The second charge then detonates the cloud, creating a massive blast wave. The blast wave destroys unreinforced buildings and equipment and kills and injures people. The antipersonnel effect of the blast wave is more severe in foxholes, on people with body armor, and in enclosed spaces such as caves, buildings, and bunkers. -Fuel-air explosives were first developed, and used in Vietnam, by the United States. Soviet scientists, however, quickly developed their own FAE weapons, which were reportedly used against China in the Sino-Soviet border conflict and in Afghanistan. Since then, research and development has continued and currently Russian forces field a wide array of third-generation FAE warheads. -Effect[edit] -A Human Rights Watch report of 1 February 2000[27] quotes a study made by the US Defense Intelligence Agency: -The [blast] kill mechanism against living targets is unique–and unpleasant.... What kills is the pressure wave, and more importantly, the subsequent rarefaction [vacuum], which ruptures the lungs.... If the fuel deflagrates but does not detonate, victims will be severely burned and will probably also inhale the burning fuel. Since the most common FAE fuels, ethylene oxide and propylene oxide, are highly toxic, undetonated FAE should prove as lethal to personnel caught within the cloud as most chemical agents. -According to a U.S. Central Intelligence Agency study,[27] "the effect of an FAE explosion within confined spaces is immense. Those near the ignition point are obliterated. Those at the fringe are likely to suffer many internal, and thus invisible injuries, including burst eardrums and crushed inner ear organs, severe concussions, ruptured lungs and internal organs, and possibly blindness." Another Defense Intelligence Agency document speculates that because the "shock and pressure waves cause minimal damage to brain tissue…it is possible that victims of FAEs are not rendered unconscious by the blast, but instead suffer for several seconds or minutes while they suffocate."[28] -Development history[edit] -Soviet and Russian developments[edit] - -A RPO-A rocket and launcher. -The Soviet armed forces extensively developed FAE weapons,[29] such as the RPO-A, and used them in Chechnya.[30] -The Russian armed forces have developed thermobaric ammunition variants for several of their weapons, such as the TGB-7V thermobaric grenade with a lethality radius of 10 metres (33 ft), which can be launched from a RPG-7. The GM-94 is a 43 mm pump-action grenade launcher which is designed mainly to fire thermobaric grenades for close quarters combat. With the grenade weighing 250 grams (8.8 oz) and holding a 160 grams (5.6 oz) explosive mixture, its lethality radius is 3 metres (9.8 ft); however, due to the deliberate "fragmentation-free" design of the grenade, 4 metres (13 ft) is already considered a safe distance.[31] The RPO-A and upgraded RPO-M are infantry-portable RPGs designed to fire thermobaric rockets. The RPO-M, for instance, has a thermobaric warhead with a TNT equivalence of 5.5 kilograms (12 lb) of TNT and destructive capabilities similar to a 152 mm High explosive fragmentation artillery shell.[32][33] The RShG-1 and the RShG-2 are thermobaric variants of the RPG-27 and RPG-26 respectively. The RShG-1 is the more powerful variant, with its warhead having a 10 metres (33 ft) lethality radius and producing about the same effect as 6 kg (13 lb) of TNT.[34] The RMG is a further derivative of the RPG-26 that uses a tandem-charge warhead, whereby the precursor HEAT warhead blasts an opening for the main thermobaric charge to enter and detonate inside.[35] The RMG's precursor HEAT warhead can penetrate 300 mm of reinforced concrete or over 100 mm of Rolled homogeneous armour, thus allowing the 105 millimetres (4.1 in) diameter thermobaric warhead to detonate inside.[36] -The other examples include the SACLOS or millimeter wave radar-guided thermobaric variants of the 9M123 Khrizantema, the 9M133F-1 thermobaric warhead variant of the 9M133 Kornet, and the 9M131F thermobaric warhead variant of the 9K115-2 Metis-M, all of which are anti-tank missiles. The Kornet has since been upgraded to the Kornet-EM, and its thermobaric variant has a maximum range of 10 kilometres (6.2 mi) and has the TNT equivalent of 7 kilograms (15 lb) of TNT.[37] The 300 mm 9M55S thermobaric cluster warhead rocket was built to be fired from the BM-30 Smerch MLRS. A dedicated carrier of thermobaric weapons is the purpose-built TOS-1, a 24-tube MLRS designed to fire 220 mm caliber thermobaric rockets. A full salvo from the TOS-1 will cover a rectangle 200x400 metres.[38] The Iskander-M theatre ballistic missile can also carry a 700 kilograms (1,500 lb) thermobaric warhead.[39] - -The fireball blast from the Russian Air Force's FOAB, the largest Thermobaric device to be detonated. -Many Russian Air Force munitions also have thermobaric variants. The 80 mm S-8 rocket has the S-8DM and S-8DF thermobaric variants. The S-8's larger 122 mm brother, the S-13 rocket, has the S-13D and S-13DF thermobaric variants. The S-13DF's warhead weighs only 32 kg (71 lb) but its power is equivalent to 40 kg (88 lb) of TNT. The KAB-500-OD variant of the KAB-500KR has a 250 kg (550 lb) thermobaric warhead. The ODAB-500PM and ODAB-500PMV unguided bombs carry a 190 kg (420 lb) fuel-air explosive each. The KAB-1500S GLONASS/GPS guided 1,500 kg (3,300 lb) bomb also has a thermobaric variant. Its fireball will cover over a 150-metre (490 ft) radius and its lethality zone is a 500-metre (1,600 ft) radius.[40] The 9M120 Ataka-V and the 9K114 Shturm ATGMs both have thermobaric variants. -In September 2007 Russia exploded the largest thermobaric weapon ever made. The weapon's yield was reportedly greater than that of the smallest dial-a-yield nuclear weapons at their lowest settings.[41][42] Russia named this particular ordnance the "Father of All Bombs" in response to the United States developed "Massive Ordnance Air Blast" (MOAB) bomb whose backronym is the "Mother of All Bombs", and which previously held the accolade of the most powerful non-nuclear weapon in history.[43] The bomb contains an about 7 tons charge of a liquid fuel such as ethylene oxide, mixed with an energetic nanoparticle such as aluminium, surrounding a high explosive burster[44] that when detonated created an explosion equivalent to 44 metric tons of TNT. -US developments[edit] - -A BLU-72/B bomb on a USAF A-1E taking off from Nakhon Phanom, in September 1968. -Current US FAE munitions include: -BLU-73 FAE I -BLU-95 500-lb (FAE-II) -BLU-96 2,000-lb (FAE-II) -CBU-55 FAE I -CBU-72 FAE I -The XM1060 40-mm grenade is a small-arms thermobaric device, which was delivered to U.S. forces in April 2003.[45] Since the 2003 Invasion of Iraq, the US Marine Corps has introduced a thermobaric 'Novel Explosive' (SMAW-NE) round for the Mk 153 SMAW rocket launcher. One team of Marines reported that they had destroyed a large one-story masonry type building with one round from 100 yards (91 m).[46] -The AGM-114N Hellfire II, first used by U.S. forces in 2003 in Iraq, uses a Metal Augmented Charge (MAC) warhead that contains a thermobaric explosive fill using fluoridated aluminium layered between the charge casing and a PBXN-112 explosive mixture. When the PBXN-112 detonates, the aluminium mixture is dispersed and rapidly burns. The resultant sustained high pressure is extremely effective against people and structures.[47] -History[edit] -Military use[edit] - -US Navy BLU-118B being prepared for shipping for use in Afghanistan, 5 March 2002. -The first experiments with thermobaric weapon were conducted in Germany during World War II and were led by Mario Zippermayr. The German bombs used coal dust as fuel and were extensively tested in 1943 and 1944, but did not reach mass production before the war ended. -The TOS-1 system was test fired in Panjshir valley during Soviet war in Afghanistan in the early 1980s.[48] -Unconfirmed reports suggest that Russian military forces used ground delivered thermobaric weapons in the storming of the Russian parliament during the 1993 Russian constitutional crisis and also during the Battle for Grozny (first and second Chechen wars) to attack dug in Chechen fighters. The use of both TOS-1 heavy MLRS and "RPO-A Shmel" shoulder-fired rocket system in the Chechen wars is reported to have occurred.[48][49] -It is theorized that a multitude of hand-held thermobaric weapons were used by the Russian Armed Forces in their efforts to retake the school during the 2004 Beslan school hostage crisis. The RPO-A and either the TGB-7V thermobaric rocket from the RPG-7 or rockets from either the RShG-1 or the RShG-2 is claimed to have been used by the Spetsnaz during the initial storming of the school.[50][51][52] At least 3 and as many as 9 RPO-A casings were later found at the positions of the Spetsnaz.[53][54] The Russian Government later admitted to the use of the RPO-A during the crisis.[55] -According to UK Ministry of Defence, British military forces have also used thermobaric weapons in their AGM-114N Hellfire missiles (carried by Apache helicopters and UAVs) against the Taliban in the War in Afghanistan.[56] -The US military also used thermobaric weapons in Afghanistan. On 3 March 2002, a single 2,000 lb (910 kg) laser guided thermobaric bomb was used by the United States Army against cave complexes in which Al-Qaeda and Taliban fighters had taken refuge in the Gardez region of Afghanistan.[57][58] The SMAW-NE was used by the US Marines during the First Battle of Fallujah and Second Battle of Fallujah. -Reports by the rebel fighters of the Free Syrian Army claim the Syrian Air Force used such weapons against residential area targets occupied by the rebel fighters, as for instance in the Battle for Aleppo[59] and also in Kafar Batna.[60] A United Nations panel of human rights investigators reported that the Syrian government used thermobaric bombs against the rebellious town of Qusayr in March 2013.[61] -Non-military use[edit] -Thermobaric and fuel-air explosives have been used in guerrilla warfare since the 1983 Beirut barracks bombing in Lebanon, which used a gas-enhanced explosive mechanism, probably propane, butane or acetylene.[62] The explosive used by the bombers in the 1993 World Trade Center bombing incorporated the FAE principle, using three tanks of bottled hydrogen gas to enhance the blast.[63][64] Jemaah Islamiyah bombers used a shock-dispersed solid fuel charge,[65] based on the thermobaric principle,[66] to attack the Sari nightclub in the 2002 Bali bombings.[67] -See also[edit] -Bunker buster -Dust explosion -FOAB -Flame fougasse -MOAB -RPO-A -SMAW -References[edit] -Jump up ^ Algeria Isp (2011-10-18). "Libye – l'Otan utilise une bombe FAE | Politique, Algérie". Algeria ISP. Retrieved 2013-04-23. -Jump up ^ Nettleton, J. Occ. Accidents, 1, 149 (1976). -Jump up ^ Strehlow, 14th. Symp. (Int.) Comb. 1189, Comb. Inst. (1973). -Jump up ^ Health and Safety Environmental Agency, 5th. and final report, 2008. -Jump up ^ See Nanofuel/Oxidizers For Energetic Compositions – John D. Sullivan and Charles N. Kingery (1994) High explosive disseminator for a high explosive air bomb. -Jump up ^ Slavica Terzić, Mirjana Dakić Kolundžija, Milovan Azdejković and Gorgi Minov (2004) Compatibility Of Thermobaric Mixtures Based On Isopropyl Nitrate And Metal Powders. -Jump up ^ Meyer, Rudolf; Josef Köhler and Axel Homburg (2007). Explosives. Weinheim: Wiley-VCH. pp. 312. ISBN 3-527-31656-6. OCLC 165404124. -Jump up ^ Howard C. Hornig (1998) Non-focusing active warhead. -Jump up ^ Chris Ludwig (Talley Defense) Verifying Performance of Thermobaric Materials for Small to Medium Caliber Rocket Warheads. -Jump up ^ Martin M.West (1982) Composite high explosives for high energy blast applications. -Jump up ^ Raafat H. Guirguis (2005) Reactively Induced Fragmenting Explosives. -Jump up ^ Michael Dunning, William Andrews and Kevin Jaansalu (2005) The Fragmentation of Metal Cylinders Using Thermobaric Explosives. -Jump up ^ David L. Frost, Fan Zhang, Stephen B. Murray and Susan McCahan Critical Conditions For Ignition Of Metal Particles In A Condensed Explosive. -Jump up ^ The Army Doctrine and Training Bulletin (2001) The Threat from Blast Weapons. -Jump up ^ INTERNATIONAL DEFENCE REVIEW (2004) ENHANCED BLAST AND THERMOBARICS. -Jump up ^ F. Winterberg Conjectured Metastable Super-Explosives formed under High Pressure for Thermonuclear Ignition. -Jump up ^ Zhang, Fan (Medicine Hat, CA) Murray, Stephen Burke (Medicine Hat, CA) Higgins, Andrew (Montreal, CA) (2005) Super compressed detonation method and device to effect such detonation. -Jump up ^ Nettleton, arch. combust.,1,131, (1981). -Jump up ^ Stephen B. Murray Fundamental and Applied Studies of Fuel-Air Detonation. -Jump up ^ John H. Lee (1992) Chemical initiation of detonation in fuel-air explosive clouds. -Jump up ^ Frank E. Lowther (1989) Nuclear-sized explosions without radiation. -Jump up ^ Nettleton, Comb. and Flame, 24,65 (1975). -Jump up ^ Fire Prev. Sci. and Tech. No. 19,4 (1976) -Jump up ^ May L.Chan (2001) Advanced Thermobaric Explosive Compositions. -Jump up ^ New Thermobaric Materials and Weapon Concepts. -Jump up ^ Robert C. Morris (2003) Small Thermobaric Weapons An Unnoticed Threat.[dead link] -^ Jump up to: a b "Backgrounder on Russian Fuel Air Explosives ("Vacuum Bombs") | Human Rights Watch". Hrw.org. 2000-02-01. Retrieved 2013-04-23. -Jump up ^ Defense Intelligence Agency, "Future Threat to the Soldier System, Volume I; Dismounted Soldier--Middle East Threat", September 1993, p. 73. Obtained by Human Rights Watch under the U.S. Freedom of Information Act. -Jump up ^ "Press | Human Rights Watch". Hrw.org. 2008-12-27. Retrieved 2009-07-30. -Jump up ^ Lester W. Grau and Timothy L. Thomas(2000)"Russian Lessons Learned From the Battles For Grozny" -Jump up ^ "Modern Firearms – GM-94". World.guns.ru. 2011-01-24. Retrieved 2011-07-12. -Jump up ^ "New RPO Shmel-M Infantry Rocket Flamethrower Man-Packable Thermobaric Weapon". defensereview.com. 2006-07-19. Retrieved 2012-08-27. -Jump up ^ "Shmel-M: Infantry Rocket-assisted Flamethrower of Enhanced Range and Lethality". Kbptula.ru. Retrieved 2013-12-28. -Jump up ^ "Modern Firearms – RShG-1". World.guns.ru. 2011-01-24. Retrieved 2011-07-12. -Jump up ^ "Modern Firearms – RMG". World.guns.ru. 2011-01-24. Retrieved 2011-07-12. -Jump up ^ "RMG - A new Multi-Purpose Assault Weapon from Bazalt". defense-update.com. Retrieved 2012-08-27. -Jump up ^ "Kornet-EM: Multi-purpose Long-range Missile System". Kbptula.ru. Retrieved 2013-12-28. -Jump up ^ "TOS-1 Heavy flamethrower system". military-today.com. Retrieved 2012-08-27. -Jump up ^ "SS-26". Missilethreat.com. Retrieved 2013-12-28. -Jump up ^ Air Power Australia (2007-07-04). "How to Destroy the Australian Defence Force". Ausairpower.net. Retrieved 2011-07-12. -Jump up ^ "Russia unveils devastating vacuum bomb". ABC News. 2007. Retrieved 2007-09-12. -Jump up ^ "Video of test explosion". BBC News. 2007. Retrieved 2007-09-12. -Jump up ^ Harding, Luke (2007-09-12). "Russia unveils the father of all bombs". London: The Guardian. Retrieved 2007-09-12. -Jump up ^ Berhie, Saba. "Dropping the Big One | Popular Science". Popsci.com. Retrieved 2011-07-12. -Jump up ^ John Pike (2003-04-22). "XM1060 40mm Thermobaric Grenade". Globalsecurity.org. Retrieved 2011-07-12. -Jump up ^ David Hambling (2005) "Marines Quiet About Brutal New Weapon" -Jump up ^ John Pike (2001-09-11). "AGM-114N Metal Augmented Charge (MAC) Thermobaric Hellfire". Globalsecurity.org. Retrieved 2011-07-12. -^ Jump up to: a b John Pike. "TOS-1 Buratino 220mm Multiple Rocket Launcher". Globalsecurity.org. Retrieved 2013-04-23. -Jump up ^ "Foreign Military Studies Office Publications - A 'Crushing' Victory: Fuel-Air Explosives and Grozny 2000". Fmso.leavenworth.army.mil. Retrieved 2013-04-23. -Jump up ^ "Russian forces faulted in Beslan school tragedy". Christian Science Monitor. 1 September 2006. Retrieved 14 February 2007. -Jump up ^ Russia: Independent Beslan Investigation Sparks Controversy, The Jamestown Foundation, 29 August 2006 -Jump up ^ Beslan still a raw nerve for Russia, BBC News, 1 September 2006 -Jump up ^ ACHING TO KNOW, Los Angeles Times, 27 August 2005 -Jump up ^ Searching for Traces of “Shmel” in Beslan School, Kommersant, 12 September 2005 -Jump up ^ A Reversal Over Beslan Only Fuels Speculation, The Moscow Times, 21 July 2005 -Jump up ^ "MoD's Controversial Thermobaric Weapons Use in Afghanistan". Armedforces-int.com. 2008-06-23. Retrieved 2013-04-23. -Jump up ^ "US Uses Bunker-Busting 'Thermobaric' Bomb for First Time". Commondreams.org. 2002-03-03. Retrieved 2013-04-23. -Jump up ^ John Pike. "BLU-118/B Thermobaric Weapon Demonstration / Hard Target Defeat Program". Globalsecurity.org. Retrieved 2013-04-23. -Jump up ^ "Syria rebels say Assad using 'mass-killing weapons' in Aleppo". October 10, 2012. Retrieved November 11, 2012. -Jump up ^ "Dropping Thermobaric Bombs on Residential Areas in Syria_ Nov. 5. 2012". First Post. November 11, 2012. Retrieved November 11, 2012. -Jump up ^ Cumming-Bruce, Nick (2013-06-04). "U.N. Panel Reports Increasing Brutality by Both Sides in Syria". The New York Times. -Jump up ^ Richard J. Grunawalt. Hospital Ships In The War On Terror: Sanctuaries or Targets? (PDF), Naval War College Review, Winter 2005, pp. 110–11. -Jump up ^ Paul Rogers (2000) "Politics in the Next 50 Years: The Changing Nature of International Conflict" -Jump up ^ J. Gilmore Childers, Henry J. DePippo (February 24, 1998). "Senate Judiciary Committee, Subcommittee on Technology, Terrorism, and Government Information hearing on "Foreign Terrorists in America: Five Years After the World Trade Center"". Fas.org. Retrieved 2011-07-12. -Jump up ^ P. Neuwald, H. Reichenbach, A. L. Kuhl (2003). "Shock-Dispersed-Fuel Charges-Combustion in Chambers and Tunnels". -Jump up ^ David Eshel (2006). "Is the world facing Thermobaric Terrorism?".[dead link] -Jump up ^ Wayne Turnbull (2003). "Bali:Preparations". -External links[edit] -Fuel/Air Explosive (FAE) -Thermobaric Explosive (Global Security) -Aspects of thermobaric weaponry (PDF) – Dr. Anna E Wildegger-Gaissmaier, Australian Defence Force Health -Thermobaric warhead for RPG-7 -XM1060 40 mm Thermobaric Grenade (Global Security) -Defense Update: Fuel-Air Explosive Mine Clearing System -Foreign Military Studies Office – A 'Crushing' Victory: Fuel-Air Explosives and Grozny 2000 -Soon to make a comeback in Afghanistan -Russia claims to have tested the most powerful "Vacuum" weapon -Categories: Explosive weaponsAmmunitionThermobaric weaponsAnti-personnel weapons -Navigation menu -Create accountLog inArticleTalkReadEditView history - -Main page -Contents -Featured content -Current events -Random article -Donate to Wikipedia -Wikimedia Shop -Interaction -Help -About Wikipedia -Community portal -Recent changes -Contact page -Tools -What links here -Related changes -Upload file -Special pages -Permanent link -Page information -Wikidata item -Cite this page -Print/export -Create a book -Download as PDF -Printable version -Languages -العربية -Беларуская -Български -Čeština -Deutsch -Español -فارسی -Français -हिन्दी -Italiano -עברית -Latviešu -Македонски -Nederlands -日本語 -Polski -Русский -Suomi -Svenska -Türkçe -Українська -Tiếng Việt -粵語 -中文 -Edit links -This page was last modified on 28 November 2014 at 10:32. -Text is available under the Creative Commons Attribution-ShareAlike License; additional terms may apply. By using this site, you agree to the Terms of Use and Privacy Policy. Wikipedia® is a registered trademark of the Wikimedia Foundation, Inc., a non-profit organization. -Privacy policyAbout WikipediaDisclaimersContact WikipediaDevelopersMobile viewWikimedia Foundation Powered by MediaWiki - - -Gunpowder -From Wikipedia, the free encyclopedia -For other uses, see Gunpowder (disambiguation). -In American English, the term gunpowder also refers broadly to any gun propellant.[1] Gunpowder (black powder) as described in this article is not normally used in modern firearms, which instead use smokeless powders. - -Black powder for muzzleloading rifles and pistols in FFFG granulation size. American Quarter (diameter 24 mm) for comparison. -Gunpowder, also known as black powder, is a chemical explosive—the earliest known. It is a mixture of sulfur, charcoal, and potassium nitrate (saltpeter). The sulfur and charcoal act as fuels, and the saltpeter is an oxidizer.[2][3] Because of its burning properties and the amount of heat and gas volume that it generates, gunpowder has been widely used as a propellant in firearms and as a pyrotechnic composition in fireworks. -Gunpowder is assigned the UN number UN0027 and has a hazard class of 1.1D. It has a flash point of approximately 427–464 °C (801–867 °F). The specific flash point may vary based on the specific composition of the gunpowder. Gunpowder's gravity is 1.70–1.82 (mercury method) orŠ 1.92–2.08 (pycnometer), and it has a pH of 6.0–8.0. It is also considered to be an insoluble material.[4] -Gunpowder was, according to prevailing academic consensus, invented in the 9th century in China,[5][6] and the earliest record of a written formula for gunpowder appears in the 11th century Song Dynasty text, Wujing Zongyao.[7] This discovery led to the invention of fireworks and the earliest gunpowder weapons in China. In the centuries following the Chinese discovery, gunpowder weapons began appearing in the Muslim world, Europe, and India. The technology spread from China through the Middle East or Central Asia, and then into Europe.[8] The earliest Western accounts of gunpowder appear in texts written by English philosopher Roger Bacon in the 13th century.[9] -Gunpowder is classified as a low explosive because of its relatively slow decomposition rate and consequently low brisance. Low explosives deflagrate (i.e., burn) at subsonic speeds, whereas high explosives detonate, producing a supersonic wave. Gunpowder's burning rate increases with pressure, so it bursts containers if contained but otherwise just burns in the open. Ignition of the powder packed behind a bullet must generate enough pressure to force it from the muzzle at high speed, but not enough to rupture the gun barrel. Gunpowder thus makes a good propellant, but is less suitable for shattering rock or fortifications. Gunpowder was widely used to fill artillery shells and in mining and civil engineering to blast rock roughly until the second half of the 19th century, when the first high explosives (nitro-explosives) were discovered. Gunpowder is no longer used in modern explosive military warheads, nor is it used as main explosive in mining operations due to its cost relative to that of newer alternatives such as ammonium nitrate/fuel oil (ANFO).[10] Black powder is still used as a delay element in various munitions where its slow-burning properties are valuable. -Formulations used in blasting rock (such as in quarrying) are called blasting powder. -Contents [hide] -1 History -1.1 China -1.2 Middle East -1.3 Mainland Europe -1.4 Britain and Ireland -1.5 India -1.6 Indonesia -2 Manufacturing technology -3 Composition and characteristics -4 Serpentine -5 Corning -6 Modern types -7 Other types of gunpowder -8 Sulfur-free gunpowder -9 Combustion characteristics -9.1 Advantages -9.2 Disadvantages -9.3 Transportation -10 Other uses -11 See also -12 References -13 External links -History[edit] - -Early Chinese rocket - -A Mongol bomb thrown against a charging Japanese samurai during the Mongol invasions of Japan after founding the Yuan Dynasty, 1281. -Main article: History of gunpowder -Gunpowder was invented in China while taoists attempted to create a potion of immortality. Chinese military forces used gunpowder-based weapons (i.e. rockets, guns, cannons) and explosives (i.e. grenades and different types of bombs) against the Mongols when the Mongols attempted to invade and breach city fortifications on China's northern borders. After the Mongols conquered China and founded the Yuan Dynasty, they used the Chinese gunpowder-based weapons technology in their attempted invasion of Japan; they also used gunpowder to fuel rockets. -The mainstream scholarly consensus is that gunpowder was invented in China, spread through the Middle East, and then into Europe,[8] although there is a dispute over how much the Chinese advancements in gunpowder warfare influenced later advancements in the Middle East and Europe.[11][12] The spread of gunpowder across Asia from China is widely attributed to the Mongols. One of the first examples of Europeans encountering gunpowder and firearms is at the Battle of Mohi in 1241. At this battle the Mongols not only used gunpowder in early Chinese firearms but in the earliest grenades as well. -A major problem confronting the study of the early history of gunpowder is ready access to sources close to the events described. Often enough, the first records potentially describing use of gunpowder in warfare were written several centuries after the fact, and may well have been colored by the contemporary experiences of the chronicler.[13] It is also difficult to accurately translate original alchemy texts, especially medieval Chinese texts that try to explain phenomena through metaphor, into modern scientific language with rigidly defined terminology. The translation difficulty has led to errors or loose interpretations bordering on artistic licence.[14][15] Early writings potentially mentioning gunpowder are sometimes marked by a linguistic process where old words acquired new meanings.[16] For instance, the Arabic word naft transitioned from denoting naphtha to denoting gunpowder, and the Chinese word pao evolved from meaning catapult to referring to cannon.[17] According to science and technology historian Bert S. Hall: "It goes without saying, however, that historians bent on special pleading, or simply with axes of their own to grind, can find rich material in these terminological thickets."[18] -China[edit] -Further information: Wujing Zongyao, Four Great Inventions and List of Chinese inventions - -Chinese Ming Dynasty (1368-1644) matchlock firearms -Saltpeter was known to the Chinese by the mid-1st century AD and there is strong evidence of the use of saltpeter and sulfur in various largely medicinal combinations.[19] A Chinese alchemical text dated 492 noted saltpeter burnt with a purple flame, providing a practical and reliable means of distinguishing it from other inorganic salts, thus enabling alchemists to evaluate and compare purification techniques; the earliest Latin accounts of saltpeter purification are dated after 1200.[20] - -Yuan Dynasty bronze hand cannon from 1332 at th (c. 808); it describes mixing six parts sulfur to six parts saltpeter to one part birthwort herb (which would provide carbon).[21] -The first reference to the incendiary properties of such mixtures is the passage of the Zhenyuan miaodao yaolüe, a Taoist text tentatively dated to the mid-9th century AD:[20] "Some have heated together sulfur, realgar and saltpete with honey; smoke and flames result, so that their hands and faces have been burnt, and even the whole house where they were working burned down."[22] The Chinese word for "gunpowder" is Chinese: 火药/火藥; pinyin: huŏ yào /xuou yɑʊ/, which literally means "Fire Medicine";[23] however this name only came into use some centuries after the mixture's discovery.[24] During the 9th century, Taoist monks or alchemists searching for an elixir of immortality had serendipitously stumbled upon gunpowder.[8][25] The Chinese wasted little time in applying gunpowder to the development of weapons, and in the centuries that followed, they produced a variety of gunpowder weapons, including flamethrowers, rockets, bombs, and land mines, before inventing guns as a projectile weapon.[26] Archaeological evidence of a hand cannon has been excavated in Manchuria dated from the late 1200s[27] and the shells of explosive bombs have been discovered in a shipwreck off the shore of Japan dated from 1281, during the Mongol invasions of Japan.[28] -The Chinese "Wu Ching Tsung Yao" (Complete Essentials from the Military Classics), written by Tseng Kung-Liang between 1040–1044, provides encyclopedia references to a variety of mixtures that included petrochemicals—as well as garlic and honey. A slow match for flame throwing mechanisms using the siphon principle and for fireworks and rockets are mentioned. The mixture formulas in this book do not contain enough saltpeter to create an explosive however; being limited to at most 50% saltpeter, they produce an incendiary.[29] The Essentials was however written by a Song Dynasty court bureaucrat, and there's little evidence that it had any immediate impact on warfare; there is no mention of gunpowder use in the chronicles of the wars against the Tanguts in the eleventh century, and China was otherwise mostly at peace during this century. The first chronicled use of "fire spears" (or "fire lances") is at the siege of De'an in 1132.[30] - -Formula for gunpowder in 1044 Wujing zongyao part I vol 12 - - -Instruction for fire bomb in Wujing zongyao - - -Fire bomb - - -Fire grenade - - -Proto-cannon from the Ming Dynasty text Huolongjing - - -Land mine from the Ming Dynasty text Huolongjing - - -Fire arrow rocket launcher from the Wujing zongyao -Middle East[edit] -Main articles: Inventions in the Islamic world and Alchemy and chemistry in Islam - -The Sultani Cannon, a very heavy bronze breech-loading cannon of type used by Ottoman Empire in the conquest of Constantinople, in 1453. -The Muslims acquired knowledge of gunpowder some time between 1240 and 1280, by which time the Syrian Hasan al-Rammah had written, in Arabic, recipes for gunpowder, instructions for the purification of saltpeter, and descriptions of gunpowder incendiaries. Gunpowder arrived in the Middle East, possibly through India, from China. This is implied by al-Rammah's usage of "terms that suggested he derived his knowledge from Chinese sources" and his references to saltpeter as "Chinese snow" Arabic: ثلج الصين‎ thalj al-ṣīn, fireworks as "Chinese flowers" and rockets as "Chinese arrows".[31] However, because al-Rammah attributes his material to "his father and forefathers", al-Hassan argues that gunpowder became prevalent in Syria and Egypt by "the end of the twelfth century or the beginning of the thirteenth".[32] Persians called saltpeter "Chinese salt" [33][34][35][36][37] or "salt from Chinese salt marshes" (namak shūra chīnī Persian: نمک شوره چيني‎).[38][39] - -A picture of a 15th-century Granadian cannon from the book Al-izz wal rifa'a. -Al-Hassan claims that in the Battle of Ain Jalut of 1260, the Mamluks used against the Mongols in "the first cannon in history" gunpowder formula with near-identical ideal composition ratios for explosive gunpowder.[32] Other historians urge caution regarding claims of Islamic firearms use in the 1204-1324 period as late medieval Arabic texts used the same word for gunpowder, naft, that they used for an earlier incendiary, naphtha.[13][17] Khan claims that it was invading Mongols who introduced gunpowder to the Islamic world[40] and cites Mamluk antagonism towards early musketeers in their infantry as an example of how gunpowder weapons were not always met with open acceptance in the Middle East.[41] Similarly, the refusal of their Qizilbash forces to use firearms contributed to the Safavid rout at Chaldiran in 1514.[41] -The earliest surviving documentary evidence for the use of the hand cannon, considered the oldest type of portable firearm and a forerunner of the handgun, are from several Arabic manuscripts dated to the 14th century.[42] Al-Hassan argues that these are based on earlier originals and that they report hand-held cannons being used by the Mamluks at the Battle of Ain Jalut in 1260.[32] -Hasan al-Rammah included 107 gunpowder recipes in his text al-Furusiyyah wa al-Manasib al-Harbiyya (The Book of Military Horsemanship and Ingenious War Devices), 22 of which are for rockets. If one takes the median of 17 of these 22 compositions for rockets (75% nitrates, 9.06% sulfur, and 15.94% carbon), it is nearly identical to the modern reported ideal gunpowder recipe of 75% potassium nitrate, 10% sulfur, and 15% carbon.[32] -The state-controlled manufacture of gunpowder by the Ottoman Empire through early supply chains to obtain nitre, sulfur and high-quality charcoal from oaks in Anatolia contributed significantly to its expansion the 15th and 18th century. It was not until later in the 19th century when the syndicalist production of Turkish gunpowder was greatly reduced, which coincided with the decline of its military might.[43] -Mainland Europe[edit] -Several sources mention Chinese firearms and gunpowder weapons being deployed by the Mongols against European forces at the Battle of Mohi in 1241.[44][45][46] Professor Kenneth Warren Chase credits the Mongols for introducing into Europe gunpowder and its associated weaponry.[47] -C. F. Temler interprets Peter, Bishop of Leon, as reporting the use of cannons in Seville in 1248.[48] -In Europe, one of the first mentions of gunpowder use appears in a passage found in Roger Bacon's Opus Maius and Opus Tertium in what has been interpreted as being firecrackers. The most telling passage reads: "We have an example of these things (that act on the senses) in [the sound and fire of] that children's toy which is made in many [diverse] parts of the world; i.e., a device no bigger than one's thumb. From the violence of that salt called saltpeter [together with sulfur and willow charcoal, combined into a powder] so horrible a sound is made by the bursting of a thing so small, no more than a bit of parchment [containing it], that we find [the ear assaulted by a noise] exceeding the roar of strong thunder, and a flash brighter than the most brilliant lightning."[9] In the early 20th century, British artillery officer Henry William Lovett Hime proposed that another work tentatively attributed to Bacon, Epistola de Secretis Operibus Artis et Naturae, et de Nullitate Magiae contained an encrypted formula for gunpowder. This claim has been disputed by historians of science including Lynn Thorndike, John Maxson Stillman and George Sarton and by Bacon's editor Robert Steele, both in terms of authenticity of the work, and with respect to the decryption method.[9] In any case, the formula claimed to have been decrypted (7:5:5 saltpeter:charcoal:sulfur) is not useful for firearms use or even firecrackers, burning slowly and producing mostly smoke.[49][50] - -Cannon forged in 1667 at the Fortín de La Galera, Nueva Esparta, Venezuela. -The Liber Ignium, or Book of Fires, attributed to Marcus Graecus, is a collection of incendiary recipes, including some gunpowder recipes. Partington dates the gunpowder recipes to approximately 1300.[51] One recipe for "flying fire" (ingis volatilis) involves saltpeter, sulfur, and colophonium, which, when inserted into a reed or hollow wood, "flies away suddenly and burns up everything." Another recipe, for artificial "thunder", specifies a mixture of one pound native sulfur, two pounds linden or willow charcoal, and six pounds of saltpeter.[52] Another specifies a 1:3:9 ratio.[52] -Some of the gunpowder recipes of De Mirabilibus Mundi of Albertus Magnus are identical to the recipes of the Liber Ignium, and according to Partington, "may have been taken from that work, rather than conversely."[53] Partington suggests that some of the book may have been compiled by Albert's students, "but since it is found in thirteenth century manuscripts, it may well be by Albert."[53] Albertus Magnus died in 1280. -A common German folk-tale is of the German priest/monk named Berthold Schwarz who independently invented gunpowder, thus earning it the German name Schwarzpulver or in English Schwarz's powder. Schwarz is also German for black so this folk-tale, while likely containing elements of truth, is considered problematic. -A major advance in manufacturing began in Europe in the late 14th century when the safety and thoroughness of incorporation was improved by wet grinding; liquid, such as distilled spirits or perhaps the urine of wine-drinking bishops[54] was added during the grinding-together of the ingredients and the moist paste dried afterwards. (The principle of wet mixing to prevent the separation of dry ingredients, invented for gunpowder, is used today in the pharmaceutical industry.[55]) It was also discovered that if the paste was rolled into balls before drying the resulting gunpowder absorbed less water from the air during storage and traveled better. The balls were then crushed in a mortar by the gunner immediately before use, with the old problem of uneven particle size and packing causing unpredictable results. -If the right size particles were chosen, however, the result was a great improvement in power. Forming the damp paste into corn-sized clumps by hand or with the use of a sieve instead of larger balls produced a product after drying that loaded much better, as each tiny piece provided its own surrounding air space that allowed much more rapid combustion than a fine powder. This "corned" gunpowder was from 30% to 300% more powerful. An example is cited where 34 pounds of serpentine was needed to shoot a 47 pound ball, but only 18 pounds of corned powder.[54] The optimum size of the grain depended on its use; larger for large cannon, finer for small arms. Larger cast cannons were easily muzzle-loaded with corned powder using a long-handled ladle. Corned powder also retained the advantage of low moisture absorption, as even tiny grains still had much less surface area to attract water than a floury powder. -During this time, European manufacturers also began regularly purifying saltpeter, using wood ashes containing potassium carbonate to precipitate calcium from their dung liquor, and using ox blood, alum, and slices of turnip to clarify the solution.[54] -Gunpowder-making and metal-smelting and casting for shot and cannon fee was closely held by skilled military tradesmen, who formed guilds that collected dues, tested apprentices, and gave pensions. "Fire workers" were also required to craft fireworks for celebrations of victory or peace. During the Renaissance, two European schools of pyrotechnic thought emerged, one in Italy and the other at Nuremberg, Germany. Vannoccio Biringuccio, born in 1480, was a member of the guild Fraternita di Santa Barbara but broke with the tradition of secrecy by setting down everything he knew in a book titled De la pirotechnia, written in vernacular. The first printed book on either gunpowder or metalworking, it was published posthumously in 1540, with 9 editions over 138 years, and also reprinted by MIT Press in 1966.[54] By the mid-17th century fireworks were used for entertainment on an unprecedented scale in Europe, being popular even at resorts and public gardens.[56] -In 1774 Louis XVI ascended to the throne of France at age 20. After he discovered that France was not self-sufficient in gunpowder, a Gunpowder Administration was established; to head it, the lawyer Antoine Lavoisier was appointed. Although from a bourgeois family, after his degree in law Lavoisier became wealthy from a company set up to collect taxes for the Crown; this allowed him to pursue experimental natural science as a hobby.[57] -Without access to cheap Indian saltpeter (controlled by the British), for hundreds of years France had relied on saltpetermen with royal warrants, the droit de fouille or "right to dig", to seize nitrous-containing soil and demolished walls of barnyards, without compensation to the owners.[58] This caused farmers, the wealthy, or entire villages to bribe the petermen and the associated bureaucracy to leave their buildings alone and the saltpeter uncollected. Lavoisier instituted a crash program to increase saltpeter production, revised (and later eliminated) the droit de fouille, researched best refining and powder manufacturing methods, instituted management and record-keeping, and established pricing that encouraged private investment in works. Although saltpeter from new Prussian-style putrefaction works had not been produced yet (the process taking about 18 months), in only a year France had gunpowder to export. A chief beneficiary of this surplus was the American Revolution. By careful testing and adjusting the proportions and grinding time, powder from mills such as at Essonne outside Paris became the best in the world by 1788, and inexpensive.[58] [59] -Britain and Ireland[edit] - -The old Powder or Pouther magazine dating from 1642, built by order of Charles I. Irvine, North Ayrshire, Scotland -Gunpowder production in Britain appears to have started in the mid 14th century AD with the aim of supplying the English Crown.[60] Records show that gunpowder was being made, in England, in 1346, at the Tower of London; a powder house existed at the Tower in 1461; and in 1515 three King's gunpowder makers worked there.[60] Gunpowder was also being made or stored at other Royal castles, such as Portchester. By the early 14th century, according to N.J.G. Pounds's study The Medieval Castle in England and Wales, many English castles had been deserted and others were crumbling. Their military significance faded except on the borders. Gunpowder had made smaller castles useless.[61] -Henry VIII of England was short of gunpowder when he invaded France in 1544 and England needed to import gunpowder via the port of Antwerp in what is now Belgium.[60] -The English Civil War (1642–1645) led to an expansion of the gunpowder industry, with the repeal of the Royal Patent in August 1641.[60] -Two British physicists, Andrew Noble and Frederick Abel, worked to improve the properties of black powder during the late 19th century. This formed the basis for the Noble-Abel gas equation for internal ballistics.[62] -The introduction of smokeless powder in the late 19th century led to a contraction of the gunpowder industry. After the end of World War I, the majority of the United Kingdom gunpowder manufacturers merged into a single company, "Explosives Trades limited"; and number of sites were closed down, including those in Ireland. This company became Nobel Industries Limited; and in 1926 became a founding member of Imperial Chemical Industries. The Home Office removed gunpowder from its list of Permitted Explosives; and shortly afterwards, on 31 December 1931, the former Curtis & Harvey's Glynneath gunpowder factory at Pontneddfechan, in Wales, closed down, and it was demolished by fire in 1932.[63] - -Gunpowder storing barrels at Martello tower in Point Pleasant Park -The last remaining gunpowder mill at the Royal Gunpowder Factory, Waltham Abbey was damaged by a German parachute mine in 1941 and it never reopened.[64] This was followed by the closure of the gunpowder section at the Royal Ordnance Factory, ROF Chorley, the section was closed and demolished at the end of World War II; and ICI Nobel's Roslin gunpowder factory, which closed in 1954.[64][65] -This left the sole United Kingdom gunpowder factory at ICI Nobel's Ardeer site in Scotland; it too closed in October 1976.[64] Since then gunpowder has been imported into the United Kingdom. In the late 1970s/early 1980s gunpowder was bought from eastern Europe, particularly from what was then the German Democratic Republic and former Yugoslavia. -India[edit] - -In the year 1780 the British began to annex the territories of the Sultanate of Mysore, during the Second Anglo-Mysore War. The British battalion was defeated during the Battle of Guntur, by the forces of Hyder Ali, who effectively utilized Mysorean rockets and Rocket artillery against the closely massed British forces. - -Mughal Emperor Shah Jahan, hunting deer using a Matchlock as the sun sets in the horizon. -Gunpowder and gunpowder weapons were transmitted to India through the Mongol invasions of India.[66][67] The Mongols were defeated by Alauddin Khilji of the Delhi Sultanate, and some of the Mongol soldiers remained in northern India after their conversion to Islam.[67] It was written in the Tarikh-i Firishta (1606–1607) that Nasir ud din Mahmud the ruler of the Delhi Sultanate presented the envoy of the Mongol ruler Hulegu Khan with a dazzling pyrotechnics display upon his arrival in Delhi in 1258 AD. Nasir ud din Mahmud tried to express his strength as a ruler and tried to ward off any Mongol attempt similar to the Siege of Baghdad (1258).[68] Firearms known as top-o-tufak also existed in many Muslim kingdoms in India by as early as 1366 AD.[68] From then on the employment of gunpowder warfare in India was prevalent, with events such as the "Siege of Belgaum" in 1473 by Sultan Muhammad Shah Bahmani.[69] -The shipwrecked Ottoman Admiral Seydi Ali Reis is known to have introduced the earliest type of Matchlock weapons, which the Ottomans used against the Portuguese during the Siege of Diu (1531). After that, a diverse variety of firearms; large guns in particular, became visible in Tanjore, Dacca, Bijapur, and Murshidabad.[70] Guns made of bronze were recovered from Calicut (1504)- the former capital of the Zamorins[71] -The Mughal Emperor Akbar mass-produced matchlocks for the Mughal Army. Akbar is personally known to have shot a leading Rajput commander during the Siege of Chittorgarh.[72] The Mughals began to use Bamboo rockets (mainly for signalling) and employ Sappers: special units that undermined heavy stone fortifications to plant gunpowder charges. -The Mughal Emperor Shah Jahan is known to have introduced much more advanced Matchlocks, their designs were a combination of Ottoman and Mughal designs. Shah Jahan also countered the British and other Europeans in his province of Gujarāt, which supplied Europe saltpeter for use in gunpowder warfare during the 17th century.[73] Bengal and Mālwa participated in saltpeter production.[73] The Dutch, French, Portuguese, and English used Chhapra as a center of saltpeter refining.[73] -Ever since the founding of the Sultanate of Mysore by Hyder Ali, French military officers were employed to train the Mysore Army. Hyder Ali and his son Tipu Sultan were the first to introduce modern Cannons and Muskets, their army was also the first in India to have official uniforms. During the Second Anglo-Mysore War Hyder Ali and his son Tipu Sultan unleashed the Mysorean rockets at their British opponents effectively defeating them on various occasions. The Mysorean rockets inspired the development of the Congreve rocket, which the British widely utilized during the Napoleonic Wars and the War of 1812.[74] -Indonesia[edit] -The Javanese Majapahit Empire was arguably able to encompass much of modern day Indonesia due to its unique mastery of bronze smithing and use of a central arsenal fed by a large number of cottage industries within the immediate region. Documentary and archeological evidence indicate that Arab or Indian traders introduced gunpowder, gonnes, muskets, blunderbusses, and cannons to the Javanese, Acehnese, and Batak via long established commercial trade routes around the early to mid 14th century CE.[75] Portuguese and Spanish invaders were unpleasantly surprised and occasionally even outgunned on occasion.[76] The resurgent Singhasari Empire overtook Sriwijaya and later emerged as the Majapahit whose warfare featured the use of fire-arms and cannonade.[77] Circa 1540 CE the Javanese, always alert for new weapons found the newly arrived Portuguese weaponry superior to that of the locally made variants. Javanese bronze breech-loaded swivel-guns, known as meriam, or erroneously as lantaka, was used widely by the Majapahit navy as well as by pirates and rival lords. The demise of the Majapahit empire and the dispersal of disaffected skilled bronze cannon-smiths to Brunei, modern Sumatra, Malaysia and the Philippines lead to widespread use, especially in the Makassar Strait. -Saltpeter harvesting was recorded by Dutch and German travelers as being common in even the smallest villages and was collected from the decomposition process of large dung hills specifically piled for the purpose. The Dutch punishment for possession of non-permitted gunpowder appears to have been amputation.[78] Ownership and manufacture of gunpowder was later prohibited by the colonial Dutch occupiers.[75] According to a colonel McKenzie quoted in Sir Thomas Stamford Raffles, The History of Java (1817), the purest sulfur was supplied from a crater from a mountain near the straits of Bali.[77] -Manufacturing technology[edit] - -Edge-runner mill in a restored mill, at Eleutherian Mills -For the most powerful black powder meal, a wood charcoal is used. The best wood for the purpose is Pacific willow,[79] but others such as alder or buckthorn can be used. In Great Britain between the 15th to 19th centuries charcoal from alder buckthorn was greatly prized for gunpowder manufacture; cottonwood was used by the American Confederate States.[80] The ingredients are reduced in particle size and mixed as intimately as possible. Originally this was with a mortar-and-pestle or a similarly operating stamping-mill, using copper, bronze or other non-sparking materials, until supplanted by the rotating ball mill principle with non-sparking bronze or lead. Historically, a marble or limestone edge runner mill, running on a limestone bed was used in Great Britain; however, by the mid 19th century AD this had changed to either an iron shod stone wheel or a cast iron wheel running on an iron bed.[81] The mix was dampened with alcohol or water during grinding to prevent accidental ignition. This also helps the extremely soluble saltpeter mix into the microscopic nooks and crannies of the very high surface-area charcoal. -Around the late 14th century AD, European powdermakers first began adding liquid during grinding to improve mixing, reduce dust, and with it the risk of explosion.[82] The powder-makers would then shape the resulting paste of dampened gunpowder, known as mill cake, into corns, or grains, to dry. Not only did corned powder keep better because of its reduced surface area, gunners also found that it was more powerful and easier to load into guns. Before long, powder-makers standardized the process by forcing mill cake through sieves instead of corning powder by hand. -The improvement was based on reducing the surface area of a higher density composition. At the beginning of the 19th century, makers increased density further by static pressing. They shoveled damp mill cake into a two-foot square box, placed this beneath a screw press and reduced it to 1/2 its volume. "Presscake" had the hardness of slate. They broke the dried slabs with hammers or rollers, and sorted the granules with sieves into different grades. In the United States, Irenee du Pont, who had learned the trade from Lavoisier, tumbled the dried grains in rotating barrels to round the edges and increase durability during shipping and handling. (Sharp grains rounded off in transport, producing fine "meal dust" that changed the burning properties.) -Another advance was the manufacture of kiln charcoal by distilling wood in heated iron retorts instead of burning it in earthen pits. Controlling the temperature influenced the power and consistency of the finished gunpowder. In 1863, in response to high prices for Indian saltpeter, DuPont chemists developed a process using potash or mined potassium chloride to convert plentiful Chilean sodium nitrate to potassium nitrate.[83] -During the 18th century gunpowder factories became increasingly dependent on mechanical energy.[84] Despite mechanization, production difficulties related to humidity control, especially during the pressing, were still present in the late 19th century. A paper from 1885 laments that "Gunpowder is such a nervous and sensitive spirit, that in almost every process of manufacture it changes under our hands as the weather changes." Pressing times to the desired density could vary by factor of three depending on the atmospheric humidity.[85] -Composition and characteristics[edit] -The term black powder was coined in the late 19th century, primarily in the United States, to distinguish prior gunpowder formulations from the new smokeless powders and semi-smokeless powders, in cases where these are not referred to as cordite. Semi-smokeless powders featured bulk volume properties that approximated black powder, but had significantly reduced amounts of smoke and combustion products. Smokeless powder has different burning properties (pressure vs. time) and can generate higher pressures and work per gram. This can rupture older weapons designed for black powder. Smokeless powders ranged in color from brownish tan to yellow to white. Most of the bulk semi-smokeless powders ceased to be manufactured in the 1920s.[86][87][88] -Black powder is a granular mixture of -a nitrate, typically potassium nitrate (KNO3), which supplies oxygen for the reaction; -charcoal, which provides carbon and other fuel for the reaction, simplified as carbon (C); -sulfur (S), which, while also serving as a fuel, lowers the temperature required to ignite the mixture, thereby increasing the rate of combustion. -Potassium nitrate is the most important ingredient in terms of both bulk and function because the combustion process releases oxygen from the potassium nitrate, promoting the rapid burning of the other ingredients.[89] To reduce the likelihood of accidental ignition by static electricity, the granules of modern black powder are typically coated with graphite, which prevents the build-up of electrostatic charge. -Charcoal does not consist of pure carbon; rather, it consists of partially pyrolyzed cellulose, in which the wood is not completely decomposed. Carbon differs from charcoal. Whereas charcoal's autoignition temperature is relatively low, carbon's is much greater. Thus, a black powder composition containing pure carbon would burn similarly to a match head, at best.[90] -The current standard composition for the black powders that are manufactured by pyrotechnicians was adopted as long ago as 1780. Proportions by weight are 75% potassium nitrate (known as saltpeter or saltpetre), 15% softwood charcoal, and 10% sulfur.[81] These ratios have varied over the centuries and by country, and can be altered somewhat depending on the purpose of the powder. For instance, power grades of black powder, unsuitable for use in firearms but adequate for blasting rock in quarrying operations, is called blasting powder rather than gunpowder with standard proportions of 70% nitrate, 14% charcoal, and 16% sulfur; blasting powder may be made with the cheaper sodium nitrate substituted for potassium nitrate and proportions may be as low as 40% nitrate, 30% charcoal, and 30% sulfur.[91] In 1857, Lamont DuPont solved the main problem of using cheaper sodium nitrate formulations when he patented DuPont "B" Blasting powder. After manufacturing grains from press-cake in the usual way, his process tumbled the powder with graphite dust for 12 hours. This formed a graphite coating on each grain that reduced its ability to absorb moisture.[92] -French war powder in 1879 used the ratio 75% saltpeter, 12.5% charcoal, 12.5% sulfur. English war powder in 1879 used the ratio 75% saltpeter, 15% charcoal, 10% sulfur.[93] The British Congreve rockets used 62.4% saltpeter, 23.2% charcoal and 14.4% sulfur, but the British Mark VII gunpowder was changed to 65% saltpeter, 20% charcoal and 15% sulfur.[94] The explanation for the wide variety in formulation relates to usage. Powder used for rocketry can use a slower burn rate since it accelerates the projectile for a much longer time—whereas powders for weapons such as flintlocks, cap-locks, or matchlocks need a higher burn rate to accelerate the projectile in a much shorter distance. Cannons usually used lower burn rate powders, because most would burst with higher burn rate powders. -Serpentine[edit] -The original dry-compounded powder used in fifteenth-century Europe was known as "Serpentine", either a reference to Satan[95] or to a common artillery piece that used it.[96] The ingredients were ground together with a mortar and pestle, perhaps for 24 hours,[96] resulting in a fine flour. Vibration during transportation could cause the components to separate again, requiring remixing in the field. Also if the quality of the saltpeter was low (for instance if it was contaminated with highly hygroscopic calcium nitrate), or if the powder was simply old (due to the mildly hygroscopic nature of potassium nitrate), in humid weather it would need to be re-dried. The dust from "repairing" powder in the field was a major hazard. -Loading cannons or bombards before the powder-making advances of the Renaissance was a skilled art. Fine powder loaded haphazardly or too tightly would burn incompletely or too slowly. Typically, the breech-loading powder chamber in the rear of the piece was filled only about half full, the serpentine powder neither too compressed nor too loose, a wooden bung pounded in to seal the chamber from the barrel when assembled, and the projectile placed on. A carefully determined empty space was necessary for the charge to burn effectively. When the cannon was fired through the touchhole, turbulence from the initial surface combustion caused the rest of the powder to be rapidly exposed to the flame.[96] -The advent of much more powerful and easy to use corned powder changed this procedure, but serpentine was used with older guns into the seventeenth century.[97] -Corning[edit] -For gunpowder to explode effectively, the combustible ingredients must be reduced to the smallest possible particle sizes, and thoroughly mixed as possible. Once mixed, however, for better results in a gun, makers discovered that the final product should be in the form of individual, dense, grains that spread the fire quickly from grain to grain, much as straw or twigs catch fire more quickly than a pile of sawdust. -Primarily for safety reasons, size reduction and mixing is done while the ingredients are damp, usually with water. After 1800, instead of forming grains by hand or with sieves, the damp mill-cake was pressed in molds to increase its density and extract the liquid, forming press-cake. The pressing took varying amounts of time, depending on conditions such as atmospheric humidity. The hard, dense product was broken again into tiny pieces, which were separated with sieves to produce a uniform product for each purpose: coarse powders for cannons, finer grained powders for muskets, and the finest for small hand guns and priming.[97] Inappropriately fine-grained powder often caused cannons to burst before the projectile could move down the barrel, due to the high initial spike in pressure.[98] Mammoth powder with large grains made for Rodman's 15-inch cannon reduced the pressure to only 20 percent as high as ordinary cannon powder would have produced.[99] -In the mid-nineteenth century, measurements were made determining that the burning rate within a grain of black powder (or a tightly packed mass) is about 0.20 fps, while the rate of ignition propagation from grain to grain is around 30 fps, over two orders of magnitude faster.[97] -Modern types[edit] -Modern corning first compresses the fine black powder meal into blocks with a fixed density (1.7 g/cm³).[100] In the United States, gunpowder grains were designated F (for fine) or C (for coarse). Grain diameter decreased with a larger number of Fs and increased with a larger number of Cs, ranging from about 2 mm for 7F to 15 mm for 7C. Even larger grains were produced for artillery bore diameters greater than about 17 cm (6.7 in). The standard DuPont Mammoth powder developed by Thomas Rodman and Lammot du Pont for use during the American Civil War had grains averaging 0.6 inches diameter, with edges rounded in a glazing barrel.[99] Other versions had grains the size of golf and tennis balls for use in 20-inch (50-cm) Rodman guns.[101] In 1875 DuPont introduced Hexagonal powder for large artillery, which was pressed using shaped plates with a small center core—about 1.5 inches diameter, like a wagon wheel nut, the center hole widened as the grain burned.[102] By 1882 German makers also produced hexagonal grained powders of a similar size for artillery.[102] -By the late 19th century manufacturing focused on standard grades of black powder from Fg used in large bore rifles and shotguns, through FFg (medium and small-bore arms such as muskets and fusils), FFFg (small-bore rifles and pistols), and FFFFg (extreme small bore, short pistols and most commonly for priming flintlocks).[103] A coarser grade for use in military artillery blanks was designated A-1. These grades were sorted on a system of screens with oversize retained on a mesh of 6 wires per inch, A-1 retained on 10 wires per inch, Fg retained on 14, FFg on 24, FFFg on 46, and FFFFg on 60. Fines designated FFFFFg were usually reprocessed to minimize explosive dust hazards.[104] In the United Kingdom, the main service gunpowders were classified RFG (rifle grained fine) with diameter of one or two millimeters and RLG (rifle grained large) for grain diameters between two and six millimeters.[101] Gunpowder grains can alternatively be categorized by mesh size: the BSS sieve mesh size, being the smallest mesh size, which retains no grains. Recognized grain sizes are Gunpowder G 7, G 20, G 40, and G 90. -Owing to the large market of antique and replica black-powder firearms in the US, modern gunpowder substitutes like Pyrodex, Triple Seven and Black Mag3[105] pellets have been developed since the 1970s. These products, which should not be confused with smokeless powders, aim to produce less fouling (solid residue), while maintaining the traditional volumetric measurement system for charges. Claims of less corrosiveness of these products have been controversial however. New cleaning products for black-powder guns have also been developed for this market.[103] -Other types of gunpowder[edit] -Besides black powder, there are other historically important types of gunpowder. "Brown gunpowder" is cited as composed of 79% nitre, 3% sulfur, and 18% charcoal per 100 of dry powder, with about 2% moisture. Prismatic Brown Powder is a large-grained product the Rottweil Company introduced in 1884 in Germany, which was adopted by the British Royal Navy shortly thereafter. The French navy adopted a fine, 3.1 millimeter, not prismatic grained product called Slow Burning Cocoa (SBC) or "cocoa powder". These brown powders reduced burning rate even further by using as little as 2 percent sulfur and using charcoal made from rye straw that had not been completely charred, hence the brown color.[102] -Lesmok powder was a product developed by DuPont in 1911[106] one of several semi-smokeless products in the industry containing a mixture of black and nitrocellulose powder. It was sold to Winchester and others primarily for .22 and .32 small calibers. Its advantage was that it was believed at the time to be less corrosive than smokeless powders then in use. It was not understood in the U.S. until the 1920s that the actual source of corrosion was the potassium chloride residue from potassium chlorate sensitized primers. The bulkier black powder fouling better disperses primer residue. Failure to mitigate primer corrosion by dispersion caused the false impression that nitrocellulose-based powder caused corrosion.[107] Lesmok had some of the bulk of black powder for dispersing primer residue, but somewhat less total bulk than straight black powder, thus requiring less frequent bore cleaning.[108] It was last sold by Winchester in 1947. -Sulfur-free gunpowder[edit] - -Burst barrel of a muzzle loader pistol replica, which was loaded with nitrocellulose powder instead of black powder and couldn't withstand the higher pressures of the modern propellant -The development of smokeless powders, such as cordite, in the late 19th century created the need for a spark-sensitive priming charge, such as gunpowder. However, the sulfur content of traditional gunpowders caused corrosion problems with Cordite Mk I and this led to the introduction of a range of sulfur-free gunpowders, of varying grain sizes.[64] They typically contain 70.5 parts of saltpeter and 29.5 parts of charcoal.[64] Like black powder, they were produced in different grain sizes. In the United Kingdom, the finest grain was known as sulfur-free mealed powder (SMP). Coarser grains were numbered as sulfur-free gunpowder (SFG n): 'SFG 12', 'SFG 20', 'SFG 40' and 'SFG 90', for example; where the number represents the smallest BSS sieve mesh size, which retained no grains. -Sulfur's main role in gunpowder is to decrease the ignition temperature. A sample reaction for sulfur-free gunpowder would be -6 KNO3 + C7H4O → 3 K2CO3 + 4 CO2 + 2 H2O + 3 N2 -Combustion characteristics[edit] -A simple, commonly cited, chemical equation for the combustion of black powder is -2 KNO3 + S + 3 C → K2S + N2 + 3 CO2. -A balanced, but still simplified, equation is[109] -10 KNO3 + 3 S + 8 C → 2 K2CO3 + 3 K2SO4 + 6 CO2 + 5 N2. -Although charcoal's chemical formula varies, it can be best summed up by its empirical formula: C7H4O. -Therefore, an even more accurate equation of the decomposition of regular black powder with the use of sulfur can be described as: -6 KNO3 + C7H4O + 2 S → K2CO3 + K2SO4 + K2S + 4 CO2 + 2 CO + 2 H2O + 3 N2 -Black powder without the use of sulfur: -10 KNO3 + 2 C7H4O → 5 K2CO3 + 4 CO2 + 5 CO + 4 H2O + 5 N2 -The burning of gunpowder does not take place as a single reaction, however, and the byproducts are not easily predicted. One study's results showed that it produced (in order of descending quantities) 55.91% solid products: potassium carbonate, potassium sulfate, potassium sulfide, sulfur, potassium nitrate, potassium thiocyanate, carbon, ammonium carbonate and 42.98% gaseous products: carbon dioxide, nitrogen, carbon monoxide, hydrogen sulfide, hydrogen, methane, 1.11% water. -Black powder made with less-expensive and more plentiful sodium nitrate (in appropriate proportions) works just as well but is more hygroscopic than powders made from Potassium nitrate—popularly known as saltpeter. Because corned black powder grains made with saltpeter are less affected by moisture in the air, they can be stored unsealed without degradation by humidity. Muzzleloaders have been known to fire after hanging on a wall for decades in a loaded state, provided they remained dry. By contrast, black powder made with sodium nitrate must be kept sealed to remain stable. -Gunpowder contains 3 megajoules per kilogram, and contains its own oxidant. For comparison, the energy density of TNT is 4.7 megajoules per kilogram, and the energy density of gasoline is 47.2 megajoules per kilogram. Gunpowder is a low explosive and as such it does not detonate; rather it deflagrates. Since it contains its own oxidizer and additionally burns faster under pressure, its combustion is capable of rupturing containers such as shell, grenade, or improvised "pipe bomb" or "pressure cooker" casings, forming shrapnel. -Advantages[edit] -In quarrying, high explosives are generally preferred for shattering rock. However, because of its low brisance, black powder causes fewer fractures and results in more usable stone compared to other explosives, making black powder useful for blasting monumental stone such as granite and marble. Black powder is well suited for blank rounds, signal flares, burst charges, and rescue-line launches. Black powder is also used in fireworks for lifting shells, in rockets as fuel, and in certain special effects. -Disadvantages[edit] -Black powder has a low energy density compared to modern "smokeless" powders, and thus to achieve high energy loadings, large amounts of black powder are needed with heavy projectiles. Black powder also produces thick smoke as a byproduct, which in military applications may give a soldier's location away to an enemy observer and may also impair aiming for additional shots. -Combustion converts less than half the mass of black powder to gas. The rest ends up as a thick layer of soot inside the barrel. In addition to being a nuisance, the residue from burnt black powder is hygroscopic and with the addition of moisture absorbed from the air, this residue forms a caustic substance. The soot contains potassium oxide or sodium oxide that turns into potassium hydroxide, or sodium hydroxide, which corrodes wrought iron or steel gun barrels. Black powder arms must be well cleaned both inside and out to remove the residue. The matchlock musket or pistol (an early gun ignition system), as well as the flintlock would often be unusable in wet weather, due to powder in the pan being exposed and dampened. Because of this unreliability, soldiers carrying muskets, known as musketeers, were armed with additional weapons such as swords or pikes. The bayonet was developed to allow the musket to be used as a pike, thus eliminating the need for the soldier to carry a secondary weapon. -Transportation[edit] -The United Nations Model Regulations on the Transportation of Dangerous Goods and national transportation authorities, such as United States Department of Transportation, have classified gunpowder (black powder) as a Group A: Primary explosive substance for shipment because it ignites so easily. Complete manufactured devices containing black powder are usually classified as Group D: Secondary detonating substance, or black powder, or article containing secondary detonating substance, such as firework, class D model rocket engine, etc., for shipment because they are harder to ignite than loose powder. As explosives, they all fall into the category of Class 1. -Other uses[edit] -Besides its use as an explosive, gunpowder has been occasionally employed for other purposes; after the Battle of Aspern-Essling (1809), the surgeon of the Napoleonic Army Larrey combated the lack of food for the wounded under his care by preparing a bouillon of horse meat seasoned with gunpowder for lack of salt.[110][111] It was also used for sterilizing on ships when there was no alcohol. -Jack Tars (British sailors) used gunpowder to create tattoos when ink wasn't available, by pricking the skin and rubbing the powder into the wound in a method known as traumatic tattooing.[112] -Christiaan Huygens experimented with gunpowder in 1673 in an early attempt to build an internal combustion engine, but he did not succeed. Modern attempts to recreate his invention were similarly unsuccessful. -Fireworks use gunpowder as lifting and burst charges, although sometimes other more powerful compositions are added to the burst charge to improve performance in small shells or provide a louder report. Most modern firecrackers no longer contain black powder. -Beginning in the 1930s, gunpowder or smokeless powder was used in rivet guns, stun guns for animals, cable splicers and other industrial construction tools.[113] The "stud gun" drove nails or screws into solid concrete, a function not possible with hydraulic tools. See Powder-actuated tool. Shotguns have been used to eliminate persistent material rings in operating rotary kilns (such as those for cement, lime, phosphate, etc.) and clinker in operating furnaces, and commercial tools make the method more reliable.[114] -Near London in 1853, Captain Shrapnel demonstrated a method for crushing gold-bearing ores by firing them from a cannon into an iron chamber, and "much satisfaction was expressed by all present". He hoped it would be useful on the goldfields of California and Australia. Nothing came of the invention, as continuously-operating crushing machines that achieved more reliable comminution were already coming into use.[115] -See also[edit] -Ballistics -Black powder substitute -Faversham explosives industry -Bulk loaded liquid propellants -Gunpowder magazine -Gunpowder Plot -Berthold Schwarz -Gunpowder warfare -History of gunpowder -Technology of the Song Dynasty -References[edit] -Jump up ^ http://www.merriam-webster.com/dictionary/gunpowder -Jump up ^ Jai Prakash Agrawal (2010). High Energy Materials: Propellants, Explosives and Pyrotechnics. Wiley-VCH. p. 69. ISBN 978-3-527-32610-5. -Jump up ^ David Cressy, Saltpeter: The Mother of Gunpowder (Oxford University Press, 2013) -Jump up ^ Owen Compliance Services. "Black Powder". Material Safety Data Sheet. Retrieved 31 August 2014. -Jump up ^ http://www.history.com/shows/ancient-discoveries/articles/who-built-it-first-2 -Jump up ^ http://chemistry.about.com/od/historyofchemistry/a/gunpowder.htm -Jump up ^ Chase 2003:31 : "the earliest surviving formulas for gunpowder can be found in the Wujing zongyao, a military work from around 1040" -^ Jump up to: a b c Buchanan 2006, p. 2 "With its ninth century AD origins in China, the knowledge of gunpowder emerged from the search by alchemists for the secrets of life, to filter through the channels of Middle Eastern culture, and take root in Europe with consequences that form the context of the studies in this volume." -^ Jump up to: a b c Joseph Needham; Gwei-Djen Lu; Ling Wang (1987). Science and civilisation in China, Volume 5, Part 7. Cambridge University Press. pp. 48–50. ISBN 978-0-521-30358-3. -Jump up ^ Hazel Rossotti (2002). Fire: Servant, Scourge, and Enigma. Courier Dover Publications. pp. 132–137. ISBN 978-0-486-42261-9. -Jump up ^ Jack Kelly Gunpowder: Alchemy, Bombards, and Pyrotechnics: The History of the Explosive that Changed the World, Perseus Books Group: 2005, ISBN 0-465-03722-4, ISBN 978-0-465-03722-3: 272 pages -Jump up ^ St. C. Easton: "Roger Bacon and his Search for a Universal Science", Oxford (1962) -^ Jump up to: a b Gábor Ágoston (2005). Guns for the sultan: military power and the weapons industry in the Ottoman Empire. Cambridge University Press. p. 15. ISBN 978-0-521-84313-3. -Jump up ^ Ingham-Brown, George (1989) The Big Bang: A History of Explosives, Sutton Publishers, ISBN 0-7509-1878-0, ISBN 978-0-7509-1878-7, page vi -Jump up ^ Kelly, Jack (2005) Gunpowder: Alchemy, Bombards, and Pyrotechnics: The History of the Explosive that Changed the World, Perseus Books Group, ISBN 0-465-03722-4, ISBN 978-0-465-03722-3, page 22 -Jump up ^ Bert S. Hall, "Introduction, 1999" pp. xvi–xvii to the reprinting of James Riddick Partington (1960). A history of Greek fire and gunpowder. JHU Press. ISBN 978-0-8018-5954-0. -^ Jump up to: a b Peter Purton (2009). A History of the Late Medieval Siege, 1200–1500. Boydell & Brewer. pp. 108–109. ISBN 978-1-84383-449-6. -Jump up ^ Bert S. Hall, "Introduction, 1999" p. xvii to the reprinting of James Riddick Partington (1960). A history of Greek fire and gunpowder. JHU Press. ISBN 978-0-8018-5954-0. -Jump up ^ Buchanan. "Editor's Introduction: Setting the Context", in Buchanan 2006. -^ Jump up to: a b Chase 2003:31–32 -Jump up ^ Lorge, Peter A. (2008). The Asian military revolution, 1300-2000 : from gunpowder to the bomb (1. publ. ed.). Cambridge: Cambridge University Press. p. 32. ISBN 978052160954-8. -Jump up ^ Kelly 2004:4 -Jump up ^ The Big Book of Trivia Fun, Kidsbooks, 2004 -Jump up ^ Peter Allan Lorge (2008), The Asian military revolution: from gunpowder to the bomb, Cambridge University Press, p. 18, ISBN 978-0-521-60954-8 -Jump up ^ Needham 1986, p. 7 "Without doubt it was in the previous century, around +850, that the early alchemical experiments on the constituents of gunpowder, with its self-contained oxygen, reached their climax in the appearance of the mixture itself." -Jump up ^ Chase 2003:1 "The earliest known formula for gunpowder can be found in a Chinese work dating probably from the 800s. The Chinese wasted little time in applying it to warfare, and they produced a variety of gunpowder weapons, including flamethrowers, rockets, bombs, and land mines, before inventing firearms." -Jump up ^ Chase 2003:1 -Jump up ^ Delgado, James (February 2003). "Relics of the Kamikaze". Archaeology (Archaeological Institute of America) 56 (1). -Jump up ^ Chase 2003:31 -Jump up ^ Peter Allan Lorge (2008), The Asian military revolution: from gunpowder to the bomb, Cambridge University Press, pp. 33–34, ISBN 978-0-521-60954-8 -Jump up ^ Kelly 2004:22 'Around year 1240, Arabs acquired knowledge of saltpeter ("Chinese snow") from the East, perhaps through India. They knew of gunpowder soon afterward. They also learned about fireworks ("Chinese flowers") and rockets ("Chinese arrows"). Arab warriors had acquired fire lances before year 1280. Around that same year, a Syrian named Hasan al-Rammah wrote a book that, as he put it, "treats of machines of fire to be used for amusement or for useful purposes." He talked of rockets, fireworks, fire lances, and other incendiaries, using terms that suggested he derived his knowledge from Chinese sources. He gave instructions for the purification of saltpeter and recipes for making different types of gunpowder.' -^ Jump up to: a b c d Hassan, Ahmad Y. "Transfer of Islamic Technology to the West: Part III". History of Science and Technology in Islam. -Jump up ^ Peter Watson (2006). Ideas: A History of Thought and Invention, from Fire to Freud. HarperCollins. p. 304. ISBN 978-0-06-093564-1. The first use of a metal tube in this context was made around 1280 in the wars between the Song and the Mongols, where a new term, chong, was invented to describe the new horror...Like paper, it reached the West via the Muslims, in this case the writings of the Andalusian botanist Ibn al-Baytar, who died in Damascus in 1248. The Arabic term for saltpetre is 'Chinese snow' while the Persian usage is 'Chinese salt'.28 -Jump up ^ Cathal J. Nolan (2006). The age of wars of religion, 1000–1650: an encyclopedia of global warfare and civilization. Volume 1 of Greenwood encyclopedias of modern world wars. Greenwood Publishing Group. p. 365. ISBN 0-313-33733-0. Retrieved 2011-11-28. In either case, there is linguistic evidence of Chinese origins of the technology: in Damascus, Arabs called the saltpeter used in making gunpowder " Chinese snow," while in Iran it was called "Chinese salt." Whatever the migratory route -Jump up ^ Oliver Frederick Gillilan Hogg (1970). Artillery: its origin, heyday, and decline. Archon Books. p. 123. The Chinese were certainly acquainted with saltpetre, the essential ingredient of gunpowder. They called it Chinese Snow and employed it early in the Christian era in the manufacture of fireworks and rockets. -Jump up ^ Oliver Frederick Gillilan Hogg (1963). English artillery, 1326–1716: being the history of artillery in this country prior to the formation of the Royal Regiment of Artillery. Royal Artillery Institution. p. 42. The Chinese were certainly acquainted with saltpetre, the essential ingredient of gunpowder. They called it Chinese Snow and employed it early in the Christian era in the manufacture of fireworks and rockets. -Jump up ^ Oliver Frederick Gillilan Hogg (1993). Clubs to cannon: warfare and weapons before the introduction of gunpowder (reprint ed.). Barnes & Noble Books. p. 216. ISBN 1-56619-364-8. Retrieved 2011-11-28. The Chinese were certainly acquainted with saltpetre, the essential ingredient of gunpowder. They called it Chinese snow and used it early in the Christian era in the manufacture of fireworks and rockets. -Jump up ^ Partington, J. R. (1960). A History of Greek Fire and Gunpowder (illustrated, reprint ed.). JHU Press. p. 335. ISBN 0801859549. Retrieved 2014-11-21. -Jump up ^ Needham, Joseph; Yu, Ping-Yu (1980). Needham, Joseph, ed. Science and Civilisation in China: Volume 5, Chemistry and Chemical Technology, Part 4, Spagyrical Discovery and Invention: Apparatus, Theories and Gifts. Volume 5 (Issue 4 of Science and Civilisation in China). Contributors Joseph Needham, Lu Gwei-Djen, Nathan Sivin (illustrated, reprint ed.). Cambridge University Press. p. 194. ISBN 052108573X. Retrieved 2014-11-21. -Jump up ^ Khan 1996 -^ Jump up to: a b Khan 2004:6 -Jump up ^ Ancient Discoveries, Episode 12: Machines of the East, History Channel, 2007 (Part 4 and Part 5) -Jump up ^ Nelson, Cameron Rubaloff (2010-07). Manufacture and transportation of gunpowder in the Ottoman Empire: 1400-1800 M.A. Thesis. -Jump up ^ William H. McNeill (1992). The Rise of the West: A History of the Human Community. University of Chicago Press. p. 492. ISBN 0-226-56141-0. Retrieved 29 July 2011. -Jump up ^ Michael Kohn (2006), Dateline Mongolia: An American Journalist in Nomad's Land, RDR Books, p. 28, ISBN 1-57143-155-1, retrieved 29 July 2011 -Jump up ^ Robert Cowley (1993). Robert Cowley, ed. Experience of War (reprint ed.). Random House Inc. p. 86. ISBN 0-440-50553-4. Retrieved 29 July 2011. -Jump up ^ Kenneth Warren Chase (2003). Firearms: a global history to 1700 (illustrated ed.). Cambridge University Press. p. 58. ISBN 0-521-82274-2. Retrieved 29 July 2011. -Jump up ^ C. F. Temler, Historische Abhandlungen der Koniglichen Gesellschaft der Wissenschaften zu Kopenhagen ... ubersetzt ... von V. A. Heinze, Kiel, Dresden and Leipzig, 1782, i, 168, as cited in Partington, p. 228, footnote 6. -Jump up ^ Joseph Needham; Gwei-Djen Lu; Ling Wang (1987). Science and civilisation in China, Volume 5, Part 7. Cambridge University Press. p. 358. ISBN 978-0-521-30358-3. -Jump up ^ Bert S. Hall, "Introduction, 1999" p. xxiv to the reprinting of James Riddick Partington (1960). A history of Greek fire and gunpowder. JHU Press. ISBN 978-0-8018-5954-0. -Jump up ^ Partington 1960:60 -^ Jump up to: a b Partington 1960:48–49, 54 -^ Jump up to: a b Partington 1960:82–83 -^ Jump up to: a b c d Kelly 2004, p.61 -Jump up ^ Molerus, Otto. "History of Civilization in the Western Hemisphere from the Point of View of Particulate Technology, Part 2," Advanced Powder Technology 7 (1996): 161-66 -Jump up ^ Microsoft Encarta Online Encyclopedia 2007 Archived 31 October 2009. -Jump up ^ In 1777 Lavoisier named oxygen, which had earlier been isolated by Priestley; the realization that saltpeter contained this substance was fundamental to understanding gunpowder. -^ Jump up to: a b Kelly 2004, p.164 -Jump up ^ Metzner, Paul (1998), Crescendo of the Virtuoso: Spectacle, Skill, and Self-Promotion in Paris during the Age of Revolution, University of California Press -^ Jump up to: a b c d Cocroft 2000, "Success to the Black Art!". Chapter 1 -Jump up ^ Ross, Charles. The Custom of the Castle: From Malory to Macbeth. Berkeley: University of California Press, c1997. [1] pages 131-130 -Jump up ^ The Noble-Abel Equation of State: Thermodynamic Derivations for Ballistics Modelling -Jump up ^ Pritchard, Tom; Evans, Jack; Johnson, Sydney (1985), The Old Gunpowder Factory at Glynneath, Merthyr Tydfil: Merthyr Tydfil & District Naturalists' Society -^ Jump up to: a b c d e Cocroft 2000, "The demise of gunpowder". Chapter 4 -Jump up ^ MacDougall, Ian (2000). 'Oh, ye had to be careful' : personal recollections by Roslin gunpowder mill and bomb factory workers. East Linton, Scotland: Tuckwell Press in association with the European Ethnological Research Centre and the Scottish Working People's History Trust. ISBN 1-86232-126-4. -Jump up ^ Iqtidar Alam Khan (2004). Gunpowder And Firearms: Warfare In Medieval India. Oxford University Press. ISBN 978-0-19-566526-0. -^ Jump up to: a b Iqtidar Alam Khan (25 April 2008). Historical Dictionary of Medieval India. Scarecrow Press. p. 157. ISBN 978-0-8108-5503-8. -^ Jump up to: a b Khan 2004:9–10 -Jump up ^ Khan 2004:10 -Jump up ^ Partington (Johns Hopkins University Press edition, 1999), 225 -Jump up ^ Partington (Johns Hopkins University Press edition, 1999), 226 -Jump up ^ http://www.youtube.com/watch?v=DTfEDaWMj4o -^ Jump up to: a b c "India." Encyclopædia Britannica. Encyclopedia Britannica 2008 Ultimate Reference Suite. Chicago: Encyclopedia Britannica, 2008. -Jump up ^ "rocket and missile system." Encyclopædia Britannica. Encyclopædia Britannica 2008 Ultimate Reference Suite. Chicago: Encyclopædia Britannica, 2008. -^ Jump up to: a b Dipanegara, P. B. R. Carey, Babad Dipanagara: an account of the outbreak of the Java war, 1825-30 : the Surakarta court version of the Babad Dipanagara with translations into English and Indonesian volume 9: Council of the M.B.R.A.S. by Art Printing Works: 1981. -Jump up ^ Atsushi, Ota (2006). Changes of regime and social dynamics in West Java : society, state, and the outer world of Banten, 1750-1830. Leiden: Brill. ISBN 90-04-15091-9. -^ Jump up to: a b Thomas Stamford Raffles, The History of Java, Oxford University Press, 1965 (originally published in 1817), ISBN 0-19-580347-7 -Jump up ^ Raffles, Thomas Stamford (1978). The History of Java ([Repr.]. ed.). Kuala Lumpur: Oxford University Press. ISBN 0-19-580347-7. -Jump up ^ US Department of Agriculture (1917). Department Bulleting No. 316: Willows: Their growth, use, and importance. The Department. p. 31. -Jump up ^ Kelly 2004, p.200 -^ Jump up to: a b Earl 1978, Chapter 2: The Development of Gunpowder -Jump up ^ Kelly 2004:60–63 -Jump up ^ Kelly 2004, p.199 -Jump up ^ Frangsmyr, Tore, J. L. Heilbron, and Robin E. Rider, editors The Quantifying Spirit in the Eighteenth Century. Berkeley: University of California Press, c1990. http://ark.cdlib.org/ark:/13030/ft6d5nb455/ p. 292. -Jump up ^ C.E. Munroe (1885) "Notes on the literature of explosives no. VIII", Proceedings of the US Naval Institute, no. XI, p. 285 -Jump up ^ The History of the 10.4×38 Swiss Cartridge -Jump up ^ Blackpowder to Pyrodex and Beyond by Randy Wakeman at Chuck Hawks -Jump up ^ The History and Art of Shotshells by Jon Farrar, Nebraskaland Magazine -Jump up ^ Buchanan. "Editor's Introduction: Setting the Context", in Buchanan 2006, p. 4. -Jump up ^ Black Powder Recipes, Ulrich Bretscher -Jump up ^ Julian S. Hatcher, Hatcher's Notebook, Military Service Publishing Company, 1947. Chapter XIII Notes on Gunpowder, pages 300-305. -Jump up ^ Kelly 2004, p.218 -Jump up ^ Book title Workshop Receipts Publisher William Clowes and Son limited Author Ernest Spon. Date 1 August 1873. -Jump up ^ GunpowderTranslation. Academic. Retrieved 2014-08-31. -Jump up ^ Cathal J. Nolan (2006), The age of wars of religion, 1000-1650: an encyclopedia of global warfare and civilization, Greenwood Publishing Group, p. 365, ISBN 978-0-313-33733-8 -^ Jump up to: a b c Kelly 2004, p58 -^ Jump up to: a b c John Francis Guilmartin (2003). Gunpowder & galleys: changing technology & Mediterranean warfare at sea in the 16th century. Conway Maritime Press. pp. 109–110 and 298–300. ISBN 0851779514. -Jump up ^ T.J. Rodman (1861), Reports of experiments on the properties of metals for cannon and the qualities of cannon powder, p. 270 -^ Jump up to: a b Kelly 2004, p.195 -Jump up ^ Tenney L. Davis (1943). The Chemistry of Powder and Explosives (PDF). p. 139. -^ Jump up to: a b Brown, G.I. (1998) The Big Bang: A history of Explosives Sutton Publishing pp.22&32 ISBN 0-7509-1878-0 -^ Jump up to: a b c Kelly 2004, p.224 -^ Jump up to: a b Rodney James (2011). The ABCs of Reloading: The Definitive Guide for Novice to Expert (9 ed.). Krause Publications. pp. 53–59. ISBN 978-1-4402-1396-0. -Jump up ^ Sharpe, Philip B. (1953) Complete Guide to Handloading Funk & Wagnalls p.137 -Jump up ^ Wakeman, Randy. "Blackpowder to Pyrodex and Beyond". Retrieved 31 August 2014. -Jump up ^ "LESMOK POWDER". -Jump up ^ Julian S. Hatcher, Hatcher's Notebook, Stackpole Books, 1962. Chapter XIV, Gun Corrosion and Ammunition Developments, pages 346-349. -Jump up ^ Wakeman, Randy. "Blackpowder to Pyrodex and Beyond". -Jump up ^ Flash! Bang! Whiz!, University of Denver -Jump up ^ Parker, Harold T. (1983). Three Napoleonic battles. (Repr., Durham, 1944. ed.). Durham, NC: Duke Univ. Pr. p. 83. ISBN 0-8223-0547-X. -Jump up ^ Larrey is quoted in French at Dr Béraud, Études Hygiéniques de la chair de cheval comme aliment, Musée des Familles (1841-42). -Jump up ^ Rediker, Marcus (1989). Between the devil and the deep blue sea : merchant seamen, pirates, and the Anglo-American maritime world, 1700-1750 (1st pbk. ed. ed.). Cambridge: Cambridge University Press. p. 12. ISBN 9780521379830. -Jump up ^ "Gunpowder Now Used To Drive Rivets And Splice Cables", April 1932, Popular Science -Jump up ^ "MasterBlaster System". Remington Products. -Jump up ^ Mining Journal 22 January 1853, p. 61 -Benton, Captain James G. (1862). A Course of Instruction in Ordnance and Gunnery (2 ed.). West Point, New York: Thomas Publications. ISBN 1-57747-079-6.. -Brown, G. I. (1998). The Big Bang: A History of Explosives. Sutton Publishing. ISBN 0-7509-1878-0.. -Buchanan, Brenda J., ed. (2006). Gunpowder, Explosives and the State: A Technological History. Aldershot: Ashgate. ISBN 0-7546-5259-9.. -Chase, Kenneth (2003). Firearms: A Global History to 1700. Cambridge University Press. ISBN 0-521-82274-2.. -Cocroft, Wayne (2000). Dangerous Energy: The archaeology of gunpowder and military explosives manufacture. Swindon: English Heritage. ISBN 1-85074-718-0.. -Crosby, Alfred W. (2002). Throwing Fire: Projectile Technology Through History. Cambridge University Press. ISBN 0-521-79158-8.. -Earl, Brian (1978). Cornish Explosives. Cornwall: The Trevithick Society. ISBN 0-904040-13-5.. -al-Hassan, Ahmad Y.. "History of Science and Technology in Islam". |chapter= ignored (help). -Johnson, Norman Gardner. "explosive". Encyclopædia Britannica. Chicago: Encyclopædia Britannica Online.. -Kelly, Jack (2004). Gunpowder: Alchemy, Bombards, & Pyrotechnics: The History of the Explosive that Changed the World. Basic Books. ISBN 0-465-03718-6.. -Khan, Iqtidar Alam (1996). "Coming of Gunpowder to the Islamic World and North India: Spotlight on the Role of the Mongols". Journal of Asian History 30: 41–5.. -Khan, Iqtidar Alam (2004). "Gunpowder and Firearms: Warfare in Medieval India". Oxford University Press. doi:10.1086/ahr.111.3.817.. -Needham, Joseph (1986). "Science & Civilisation in China". V:7: The Gunpowder Epic. Cambridge University Press. ISBN 0-521-30358-3.. -Norris, John (2003). Early Gunpowder Artillery: 1300-1600. Marlborough: The Crowood Press. ISBN 9781861266156.. -Partington, J.R. (1960). A History of Greek Fire and Gunpowder. Cambridge, UK: W. Heffer & Sons.. -Partington, James Riddick; Hall, Bert S. (1999). A History of Greek Fire and Gunpowder. Baltimore: Johns Hopkins University Press. doi:10.1353/tech.2000.0031. ISBN 0-8018-5954-9. -Urbanski, Tadeusz (1967). "Chemistry and Technology of Explosives" III. New York: Pergamon Press.. -External links[edit] - Wikimedia Commons has media related to Gunpowder. - Look up gunpowder in Wiktionary, the free dictionary. -Gun and Gunpowder -The Origins of Gunpowder -Cannons and Gunpowder -Oare Gunpowder Works, Kent, UK -Royal Gunpowder Mills -The DuPont Company on the Brandywine A digital exhibit produced by the Hagley Library that covers the founding and early history of the DuPont Company powder yards in Delaware -"Ulrich Bretschler's Gunpowder Chemistry page". -Video Demonstration of the Medieval Siege Society's Guns, Including showing ignition of gunpowder -Black Powder Recipes -"Dr. Sasse's investigations (and others) found via search at US DTIC.MIL These contain scientific studies of BP properties and details of measurement techniques.". -Categories: GunpowderChinese inventionsExplosivesFirearm propellantsPyrotechnic compositionsRocket fuelsSolid fuels -Navigation menu -Create accountLog inArticleTalkReadEditView history - -Main page -Contents -Featured content -Current events -Random article -Donate to Wikipedia -Wikimedia Shop -Interaction -Help -About Wikipedia -Community portal -Recent changes -Contact page -Tools -What links here -Related changes -Upload file -Special pages -Permanent link -Page information -Wikidata item -Cite this page -Print/export -Create a book -Download as PDF -Printable version -Languages -Afrikaans -العربية -Aragonés -Asturianu -Azərbaycanca -Башҡортса -Беларуская -Беларуская (тарашкевіца)‎ -Български -Bosanski -Brezhoneg -Буряад -Català -Чӑвашла -Čeština -Corsu -Cymraeg -Dansk -Deutsch -Eesti -Ελληνικά -Español -Esperanto -Euskara -فارسی -Français -Gaeilge -Galego -贛語 -Хальмг -한국어 -हिन्दी -Hrvatski -Ilokano -Bahasa Indonesia -Íslenska -Italiano -עברית -Kapampangan -Kiswahili -Kurdî -Latina -Latviešu -Lietuvių -Limburgs -Magyar -Македонски -മലയാളം -مصرى -Монгол -Nederlands -नेपाली -नेपाल भाषा -日本語 -Нохчийн -Norsk bokmål -Norsk nynorsk -Occitan -Oʻzbekcha -پنجابی -Polski -Português -Română -Runa Simi -Русский -Саха тыла -Scots -Shqip -Sicilianu -Simple English -Slovenčina -Slovenščina -کوردی -Српски / srpski -Srpskohrvatski / српскохрватски -Suomi -Svenska -Tagalog -தமிழ் -Татарча/tatarça -ไทย -Türkçe -Українська -اردو -Tiếng Việt -Võro -Winaray -ייִדיש -粵語 -Žemaitėška -中文 -Edit links -This page was last modified on 28 November 2014 at 05:37. -Text is available under the Creative Commons Attribution-ShareAlike License; additional terms may apply. By using this site, you agree to the Terms of Use and Privacy Policy. Wikipedia® is a registered trademark of the Wikimedia Foundation, Inc., a non-profit organization. -Privacy policyAbout WikipediaDisclaimersContact WikipediaDevelopersMobile viewWikimedia Foundation Powered by MediaWiki - - -Smokeless powder -From Wikipedia, the free encyclopedia - -Finnish smokeless powder -Smokeless powder is the name given to a number of propellants used in firearms and artillery that produce negligible smoke when fired, unlike the black powder they replaced. The term is unique to the United States and is generally not used in other English-speaking countries, which initially used proprietary names such as "Ballistite" and "Cordite" but gradually shifted to "propellant" as the generic term. -The basis of the term smokeless is that the combustion products are mainly gaseous, compared to around 55% solid products (mostly potassium carbonate, potassium sulfate, and potassium sulfide) for black powder.[1] Despite its name, smokeless powder is not completely smoke-free;[2] while there may be little noticeable smoke from small-arms ammunition, smoke from artillery fire can be substantial. This article focuses on nitrocellulose formulations, but the term smokeless powder was also used to describe various picrate mixtures with nitrate, chlorate, or dichromate oxidizers during the late 19th century, before the advantages of nitrocellulose became evident.[3] -Since the 14th century[4] gunpowder was not actually a physical "powder," and smokeless powder can only be produced as a pelletized or extruded granular material. Smokeless powder allowed the development of modern semi- and fully automatic firearms and lighter breeches and barrels for artillery. Burnt black powder leaves a thick, heavy fouling that is hygroscopic and causes rusting of the barrel. The fouling left by smokeless powder exhibits none of these properties (though some primer compounds can leave hygroscopic salts that have a similar effect; non-corrosive primer compounds were introduced in the 1920s[5][6]). This makes an autoloading firearm with many moving parts feasible (which would otherwise jam or seize under heavy black powder fouling). -Smokeless powders are classified as, typically, division 1.3 explosives under the UN Recommendations on the transportation of Dangerous goods – Model Regulations, regional regulations (such as ADR) and national regulations (such the United States' ATF). However, they are used as solid propellants; in normal use, they undergo deflagration rather than detonation. -Contents [hide] -1 Background -2 Nitroglycerine and guncotton -3 Propellant improvements -4 Chemical formulations -5 Instability and stabilization -6 Physical variations -7 Smokeless propellant components -8 Manufacturing -9 Flashless propellant -10 See also -11 References -11.1 Notes -11.2 Sources -12 External links -Background[edit] -Military commanders had been complaining since the Napoleonic Wars about the problems of giving orders on a battlefield obscured by the smoke of firing. Verbal commands could not be heard above the noise of the guns, and visual signals could not be seen through the thick smoke from the gunpowder used by the guns. Unless there was a strong wind, after a few shots, soldiers using black powder ammunition would have their view obscured by a huge cloud of smoke. Snipers or other concealed shooters were given away by a cloud of smoke over the firing position. Black powder is also corrosive, making cleaning mandatory after every use. Likewise, black powder's tendency to produce severe fouling caused actions to jam and often made reloading difficult. -Nitroglycerine and guncotton[edit] -Nitroglycerine was synthesized by the Italian chemist Ascanio Sobrero in 1847.[7] It was subsequently developed and manufactured by Alfred Nobel as an industrial explosive, but even then it was unsuitable as a propellant: despite its energetic and smokeless qualities, it detonates instead of deflagrating smoothly, making it more amenable to shattering a gun than propelling a projectile out of it. Nitroglycerine per se is also highly unstable, making it unfit to be carried in battlefield conditions. -A major step forward was the discovery of guncotton, a nitrocellulose-based material, by Swiss chemist Christian Friedrich Schönbein in 1846. He promoted its use as a blasting explosive[8] and sold manufacturing rights to the Austrian Empire. Guncotton was more powerful than gunpowder, but at the same time was once again somewhat more unstable. John Taylor obtained an English patent for guncotton; and John Hall & Sons began manufacture in Faversham. -English interest languished after an explosion destroyed the Faversham factory in 1847. Austrian Baron Wilhelm Lenk von Wolfsberg built two guncotton plants producing artillery propellent, but it too was dangerous under field conditions, and guns that could fire thousands of rounds using gunpowder would reach their service life after only a few hundred shots with the more powerful guncotton. Small arms could not withstand the pressures generated by guncotton at all. -After one of the Austrian factories blew up in 1862, Thomas Prentice & Company began manufacturing guncotton in Stowmarket in 1863; and British War Office chemist Sir Frederick Abel began thorough research at Waltham Abbey Royal Gunpowder Mills leading to a manufacturing process that eliminated the impurities in nitrocellulose making it safer to produce and a stable product safer to handle. Abel patented this process in 1865, when the second Austrian guncotton factory exploded. After the Stowmarket factory exploded in 1871, Waltham Abbey began production of guncotton for torpedo and mine warheads.[9] -Propellant improvements[edit] -In 1863, Prussian artillery captain Johann F. E. Schultze patented a small arms propellent of nitrated hardwood impregnated with saltpetre or barium nitrate. Prentice received an 1866 patent for a sporting powder of nitrated paper manufactured at Stowmarket, but ballistic uniformity suffered as the paper absorbed atmospheric moisture. In 1871, Frederick Volkmann received an Austrian patent for a colloided version of Schultze powder called Collodin, which he manufactured near Vienna for use in sporting firearms. Austrian patents were not published at the time, and the Austrian Empire considered the operation a violation of the government monopoly on explosives manufacture and closed the Volkmann factory in 1875.[9] In 1882, the Explosives Company at Stowmarket patented an improved formulation of nitrated cotton gelatinised by ether-alcohol with nitrates of potassium and barium. These propellants were suitable for shotguns but not rifles.[10] - -Poudre B single-base smokeless powder flakes -In 1884, Paul Vieille invented a smokeless powder called Poudre B (short for poudre blanche—white powder, as distinguished from black powder)[11] made from 68.2% insoluble nitrocellulose, 29.8% soluble nitrocellusose gelatinized with ether and 2% paraffin. This was adopted for the Lebel rifle.[12] It was passed through rollers to form paper thin sheets, which were cut into flakes of the desired size.[11] The resulting propellant, today known as pyrocellulose, contains somewhat less nitrogen than guncotton and is less volatile. A particularly good feature of the propellant is that it will not detonate unless it is compressed, making it very safe to handle under normal conditions. -Vieille's powder revolutionized the effectiveness of small guns, because it gave off almost no smoke and was three times more powerful than black powder. Higher muzzle velocity meant a flatter trajectory and less wind drift and bullet drop, making 1000 meter shots practicable. Since less powder was needed to propel a bullet, the cartridge could be made smaller and lighter. This allowed troops to carry more ammunition for the same weight. Also, it would burn even when wet. Black powder ammunition had to be kept dry and was almost always stored and transported in watertight cartridges. -Other European countries swiftly followed and started using their own versions of Poudre B, the first being Germany and Austria, which introduced new weapons in 1888. Subsequently Poudre B was modified several times with various compounds being added and removed. Krupp began adding diphenylamine as a stabilizer in 1888.[9] -Meanwhile, in 1887, Alfred Nobel obtained an English patent for a smokeless gunpowder he called Ballistite. In this propellant the fibrous structure of cotton (nitro-cellulose) was destroyed by a nitro-glycerine solution instead of a solvent.[13] In England in 1889, a similar powder was patented by Hiram Maxim, and in the USA in 1890 by Hudson Maxim.[14] Ballistite was patented in the United States in 1891. -The Germans adopted ballistite for naval use in 1898, calling it WPC/98. The Italians adopted it as filite, in cord instead of flake form, but realising its drawbacks changed to a formulation with nitroglycerine they called solenite. In 1891 the Russians tasked the chemist Mendeleef with finding a suitable propellant, he created nitrocellulose gelatinised by ether-alcohol, which produced more nitrogen and more uniform colloidal structure than the French use of nitro-cottons in Poudre B. He called it pyro-collodion.[13] -Britain conducted trials on all the various types of propellant brought to their attention, but were dissatisfied with them all and sought something superior to all existing types. In 1889, Sir Frederick Abel, James Dewar and Dr W Kellner patented (Nos 5614 and 11,664 in the names of Abel and Dewar) a new formulation that was manufactured at the Royal Gunpowder Factory at Waltham Abbey. It entered British service in 1891 as Cordite Mark 1. Its main composition was 58% Nitro-glycerine, 37% Guncotton and 3% mineral jelly. A modified version, Cordite MD, entered service in 1901, this increased guncotton to 65% and reduced nitro-glycerine to 30%, this change reduced the combustion temperature and hence erosion and barrel wear. Cordite's advantages over gunpowder were reduced maximum pressure in the chamber (hence lighter breeches, etc.) but longer high pressure. Cordite could be made in any desired shape or size.[15] The creation of cordite led to a lengthy court battle between Nobel, Maxim, and another inventor over alleged British patent infringement. -The Anglo-American Explosives Company began manufacturing its shotgun powder in Oakland, New Jersey in 1890. DuPont began producing guncotton at Carneys Point Township, New Jersey in 1891.[3] Charles E. Munroe of the Naval Torpedo Station in Newport, Rhode Island patented a formulation of guncotton colloided with nitrobenzene, called Indurite, in 1891.[16] Several United States firms began producing smokeless powder when Winchester Repeating Arms Company started loading sporting cartridges with Explosives Company powder in 1893. California Powder Works began producing a mixture of nitroglycerine and nitrocellulose with ammonium picrate as Peyton Powder, Leonard Smokeless Powder Company began producing nitroglycerine-nitrocellulose Ruby powders, Laflin & Rand negotiated a license to produce Ballistite, and DuPont started producing smokeless shotgun powder. The United States Army evaluated 25 varieties of smokeless powder and selected Ruby and Peyton Powders as the most suitable for use in the Krag-Jørgensen service rifle. Ruby was preferred, because tin-plating was required to protect brass cartridge cases from picric acid in the Peyton Powder. Rather than paying the required royalties for Ballistite, Laflin & Rand financed Leonard's reorganization as the American Smokeless Powder Company. United States Army Lieutenant Whistler assisted American Smokeless Powder Company factory superintendent Aspinwall in formulating an improved powder named W.A. for their efforts. W.A. smokeless powder was the standard for United States military service rifles from 1897 until 1908.[3] -In 1897, United States Navy Lieutenant John Bernadou patented a nitrocellulose powder colloided with ether-alcohol.[16] The Navy licensed or sold patents for this formulation to DuPont and the California Powder Works while retaining manufacturing rights for the Naval Powder Factory, Indian Head, Maryland constructed in 1900. The United States Army adopted the Navy single-base formulation in 1908 and began manufacture at Picatinny Arsenal.[3] By that time Laflin & Rand had taken over the American Powder Company to protect their investment, and Laflin & Rand had been purchased by DuPont in 1902.[17] Upon securing a 99-year lease of the Explosives Company in 1903, DuPont enjoyed use of all significant smokeless powder patents in the United States, and was able to optimize production of smokeless powder.[3] When government anti-trust action forced divestiture in 1912, DuPont retained the nitrocellulose smokeless powder formulations used by the United States military and released the double-base formulations used in sporting ammunition to the reorganized Hercules Powder Company. These newer propellants were more stable and thus safer to handle than Poudre B, and also more powerful. -Chemical formulations[edit] -"Double base" redirects here. For the musical instrument, see double bass. -Currently, propellants using nitrocellulose (detonation velocity 7,300 m/s (23,950 ft/s)) (typically an ether-alcohol colloid of nitrocellulose) as the sole explosive propellant ingredient are described as single-base powder.[18] -Propellants mixtures containing nitrocellulose and nitroglycerin (detonation velocity 7,700 m/s (25,260 ft/s)) as explosive propellant ingredients are known as double-base powder.[19] -During the 1930s triple-base propellant containing nitrocellulose, nitroglycerin, and a substantial quantity of nitroguanidine (detonation velocity 8,200 m/s (26,900 ft/s)) as explosive propellant ingredients was developed. These propellant mixtures have reduced flash and flame temperature without sacrificing chamber pressure compared to single and double base propellants, albeit at the cost of more smoke. -In practice, triple base propellants are reserved mainly for large caliber ammunition such as used in (naval) artillery and tank guns. During World War II it had some use by British artillery. After that war it became the standard propellant in all British large caliber ammunition designs except small-arms. Most western nations, except the United States, followed a similar path. -In the late 20th century new propellant formulations started to appear. These are based on nitroguanidine and high explosives of the RDX (detonation velocity 8,750 m/s (28,710 ft/s)) type. -Instability and stabilization[edit] -Nitrocellulose deteriorates with time, yielding acidic byproducts. Those byproducts catalyze the further deterioration, increasing its rate. The released heat, in case of bulk storage of the powder, or too large blocks of solid propellant, can cause self-ignition of the material. Single-base nitrocellulose propellants are hygroscopic and most susceptible to degradation; double-base and triple-base propellants tend to deteriorate more slowly. To neutralize the decomposition products, which could otherwise cause corrosion of metals of the cartridges and gun barrels, calcium carbonate is added to some formulations. -To prevent buildup of the deterioration products, stabilizers are added. Diphenylamine is one of the most common stabilizers used. Nitrated analogs of diphenylamine formed in the process of stabilizing decomposing powder are sometimes used as stabilizers themselves.[20][21] The stabilizers are added in the amount of 0.5–2% of the total amount of the formulation; higher amounts tend to degrade its ballistic properties. The amount of the stabilizer is depleted with time. Propellants in storage should be periodically tested for the amount of stabilizer remaining, as its depletion may lead to auto-ignition of the propellant. -Physical variations[edit] - -Ammunition handloading powders -Smokeless powder may be corned into small spherical balls or extruded into cylinders or strips with many cross-sectional shapes (strips with various rectangular proportions, single or multi-hole cylinders, slotted cylinders) using solvents such as ether. These extrusions can be cut into short ('flakes') or long pieces ('cords' many inches long). Cannon powder has the largest pieces. -The properties of the propellant are greatly influenced by the size and shape of its pieces. The specific surface area of the propellant influences the speed of burning, and the size and shape of the particles determine the specific surface area. By manipulation of the shape it is possible to influence the burning rate and hence the rate at which pressure builds during combustion. Smokeless powder burns only on the surfaces of the pieces. Larger pieces burn more slowly, and the burn rate is further controlled by flame-deterrent coatings that retard burning slightly. The intent is to regulate the burn rate so that a more or less constant pressure is exerted on the propelled projectile as long as it is in the barrel so as to obtain the highest velocity. The perforations stabilize the burn rate because as the outside burns inward (thus shrinking the burning surface area) the inside is burning outward (thus increasing the burning surface area, but faster, so as to fill up the increasing volume of barrel presented by the departing projectile).[22] Fast-burning pistol powders are made by extruding shapes with more area such as flakes or by flattening the spherical granules. Drying is usually performed under a vacuum. The solvents are condensed and recycled. The granules are also coated with graphite to prevent static electricity sparks from causing undesired ignitions.[23] -Faster-burning propellants generate higher temperatures and higher pressures, however they also increase wear on gun barrels. -Smokeless propellant components[edit] -The propellant formulations may contain various energetic and auxiliary components: -Propellants: -Nitrocellulose, an energetic component of most smokeless propellants[24] -Nitroglycerin, an energetic component of double-base and triple-base formulations[24] -Nitroguanidine, a component of triple-base formulations[24] -D1NA (bis-nitroxyethylnitramine)[25] -Fivonite (tetramethylolcyclopentanone)[25] -DGN (di-ethylene glycol dinitrate)[26] -Acetyl cellulose[27] -Deterrents, (or moderants), to slow the burning rate -Centralites (symmetrical diphenyl urea—primarily diethyl or dimethyl)[28][29] -Dibutyl phthalate[24][29] -Dinitrotoluene (toxic, carcinogenic, and obsolete)[24][30] -Akardite (asymmetrical diphenyl urea)[26] -ortho-tolyl urethane[31] -Polyester adipate -Camphor (obsolete)[29] -Stabilizers, to prevent or slow down self-decomposition[32] -Diphenylamine[33] -Petroleum jelly[34] -Calcium carbonate[24] -Magnesium oxide[26] -Sodium bicarbonate[27] -beta-naphthol methyl ether[31] -Amyl alcohol (obsolete)[35] -Aniline (obsolete)[36] -Decoppering additives, to hinder the buildup of copper residues from the gun barrel rifling -Tin metal and compounds (e.g., tin dioxide)[24][37] -Bismuth metal and compounds (e.g., bismuth trioxide, bismuth subcarbonate, bismuth nitrate, bismuth antimonide); the bismuth compounds are favored as copper dissolves in molten bismuth, forming brittle and easily removable alloy -Lead foil and lead compounds, phased out due to toxicity[25] -Flash reducers, to reduce the brightness of the muzzle flash (all have a disadvantage: the production of smoke)[38] -Potassium chloride[39] -Potassium nitrate -Potassium sulfate[24][37] -Potassium hydrogen tartarate (a byproduct of wine production formerly used by French artillery)[39] -Wear reduction additives, to lower the wear of the gun barrel liners[40] -Wax -Talc -Titanium dioxide -Polyurethane jackets over the powder bags, in large guns -Other additives -Ethyl acetate, a solvent for manufacture of spherical powder[34] -Rosin, a surfactant to hold the grain shape of spherical powder -Graphite, a lubricant to cover the grains and prevent them from sticking together, and to dissipate static electricity[23] -Manufacturing[edit] -This section describes procedures used in the United States. See Cordite for alternative procedures formerly used in the United Kingdom. -The United States Navy manufactured single-base tubular powder for naval artillery at Indian Head, Maryland, beginning in 1900. Similar procedures were used for United States Army production at Picatinny Arsenal beginning in 1907[18] and for manufacture of smaller grained Improved Military Rifle (IMR) powders after 1914. Short-fiber cotton linter was boiled in a solution of sodium hydroxide to remove vegetable waxes, and then dried before conversion to nitrocellulose by mixing with concentrated nitric and sulfuric acids. Nitrocellulose still resembles fibrous cotton at this point in the manufacturing process, and was typically identified as pyrocellulose because it would spontaneously ignite in air until unreacted acid was removed. The term guncotton was also used; although some references identify guncotton as a more extensively nitrated and refined product used in torpedo and mine warheads prior to use of TNT.[41] -Unreacted acid was removed from pyrocellulose pulp by a multistage draining and water washing process similar to that used in paper mills during production of chemical woodpulp. Pressurized alcohol removed remaining water from drained pyrocellulose prior to mixing with ether and diphenylamine. The mixture was then fed through a press extruding a long turbular cord form to be cut into grains of the desired length.[42] -Alcohol and ether were then evaporated from "green" powder grains to a remaining solvent concentration between 3 percent for rifle powders and 7 percent for large artillery powder grains. Burning rate is inversely proportional to solvent concentration. Grains were coated with electrically conductive graphite to minimize generation of static electricity during subsequent blending. "Lots" containing more than ten tonnes of powder grains were mixed through a tower arrangement of blending hoppers to minimize ballistic differences. Each blended lot was then subjected to testing to determine the correct loading charge for the desired performance.[43][44] -Military quantities of old smokeless powder were sometimes reworked into new lots of propellants.[45] Through the 1920s Dr. Fred Olsen worked at Picatinny Arsenal experimenting with ways to salvage tons of single-base cannon powder manufactured for World War I. Dr. Olsen was employed by Western Cartridge Company in 1929 and developed a process for manufacturing spherical smokeless powder by 1933.[46] Reworked powder or washed pyrocellulose can be dissolved in ethyl acetate containing small quantities of desired stabilizers and other additives. The resultant syrup, combined with water and surfactants, can be heated and agitated in a pressurized container until the syrup forms an emulsion of small spherical globules of the desired size. Ethyl acetate distills off as pressure is slowly reduced to leave small spheres of nitrocellulose and additives. The spheres can be subsequently modified by adding nitroglycerine to increase energy, flattening between rollers to a uniform minimum dimension, coating with phthalate deterrents to retard ignition, and/or glazing with graphite to improve flow characteristics during blending.[47][48] -Modern smokeless powder is produced in the United States by St. Marks Powder, Inc. owned by General Dynamics.[49] -Flashless propellant[edit] -Muzzle flash is the light emitted in the vicinity of the muzzle by the hot propellant gases and the chemical reactions that follow as the gases mix with the surrounding air. Before projectiles exit a slight pre-flash may occur from gases leaking past the projectiles. Following muzzle exit the heat of gases is usually sufficient to emit visible radiation – the primary flash. The gases expand but as they pass through the Mach disc they are re-compressed to produce an intermediate flash. Hot combustible gases (e.g. hydrogen and carbon-monoxide) may follow when they mix with oxygen in the surrounding air to produce the secondary flash, the brightest. The secondary flash does not usually occur with small-arms.[50] -Nitrocellulose contains insufficient oxygen to completely oxidize its carbon and hydrogen. The oxygen deficit is increased by addition of graphite and organic stabilizers. Products of combustion within the gun barrel include flammable gasses like hydrogen and carbon monoxide. At high temperature, these flammable gasses will ignite when turbulently mixed with atmospheric oxygen beyond the muzzle of the gun. During night engagements the flash produced by ignition can reveal the location of the gun to enemy forces[51] and cause temporary night-blindness among the gun crew by photo-bleaching visual purple.[52] -Flash suppressors are commonly used on small arms to reduce the flash signature, but this approach is not practical for artillery. Artillery muzzle flash up to 150 feet (46 m) from the muzzle has been observed, and can be reflected off clouds and be visible for distances up to 30 miles (48 km).[51] For artillery the most effective method is a propellant that produces a large proportion of inert nitrogen at relatively low temperatures that dilutes the combustible gases. Triple based propellants are used for this because of the nitrogen in the nitroguandine.[53] -Before the use of triple based propellants the usual method of flash reduction was to add inorganic salts like potassium chloride so their specific heat capacity might reduce the temperature of combustion gasses and their finely divided particulate smoke might block visible wavelengths of radiant energy of combustion.[39] -See also[edit] -Portal icon Pyrotechnics portal -Antique guns -Ballistite -Cordite -Firearms -Gunpowder -Nitrocellulose -Small arms -Brown-brown – a drug created by mixing cocaine with cartridge powder -References[edit] -Notes[edit] -Jump up ^ Hatcher, Julian S. and Barr, Al Handloading Hennage Lithograph Company (1951) p.34 -Jump up ^ Fairfield, A. P., CDR USN Naval Ordnance Lord Baltimore Press (1921) p.44 -^ Jump up to: a b c d e Sharpe, Philip B. Complete Guide to Handloading 3rd Edition (1953) Funk & Wagnalls pp.146-149 -Jump up ^ seegunpowder -Jump up ^ Sharpe, Philip B. Complete Guide To Handloading (1953) Funk & Wagnalls p.60 -Jump up ^ Davis, William C., Jr. Handloading (1981) National Rifle Association p.21 -Jump up ^ Davis, Tenney L. The Chemistry of Powder & Explosives (1943) page 195 -Jump up ^ Davis, William C., Jr. Handloading National Rifle Association of America (1981) p.28 -^ Jump up to: a b c Sharpe, Philip B. Complete Guide to Handloading 3rd Edition (1953) Funk & Wagnalls pp.141-144 -Jump up ^ Hogg, Oliver F. G. Artillery: Its Origin, Heyday and Decline (1969) p.138-139 -^ Jump up to: a b Davis, Tenney L. The Chemistry of Powder & Explosives (1943) pages 289–292 -Jump up ^ Hogg, Oliver F. G. Artillery: Its Origin, Heyday and Decline (1969) p.139 -^ Jump up to: a b Hogg, Oliver F. G. Artillery: Its Origin, Heyday and Decline (1969) p.140 -Jump up ^ U.S. Patent 430,212 – Manufacture of explosive – H. S. Maxim -Jump up ^ Hogg, Oliver F. G. Artillery: Its Origin, Heyday and Decline (1969) p.141 -^ Jump up to: a b Davis, Tenney L. The Chemistry of Powder & Explosives (1943) pages 296-297 -Jump up ^ "Laflin & Rand Powder Company". DuPont. Retrieved 2012-02-24. -^ Jump up to: a b Davis, Tenny L. The Chemistry of Powder & Explosives (1943) p.297 -Jump up ^ Davis, Tenny L. The Chemistry of Powder & Explosives (1943) p.298 -Jump up ^ Fairfield, A. P., CDR USN Naval Ordnance Lord Baltimore Press (1921) p.28 -Jump up ^ Davis, Tenny L. The Chemistry of Powder & Explosives (1943) p. 310 -Jump up ^ Fairfield, A. P., CDR USN Naval Ordnance Lord Baltimore Press (1921) pp.41–43 -^ Jump up to: a b Davis, Tenny L. The Chemistry of Powder & Explosives (1943) p.306 -^ Jump up to: a b c d e f g h Campbell, John Naval Weapons of World War Two (1985) p. 5 -^ Jump up to: a b c Campbell, John Naval Weapons of World War Two (1985) p. 104 -^ Jump up to: a b c Campbell, John Naval Weapons of World War Two (1985) p. 221 -^ Jump up to: a b Campbell, John Naval Weapons of World War Two (1985) p. 318 -Jump up ^ Davis, Tenny L. The Chemistry of Powder & Explosives (1943) pages 317–320 -^ Jump up to: a b c Davis, William C., Jr. Handloading National Rifle Association of America (1981) p.30 -Jump up ^ Davis, William C., Jr. Handloading National Rifle Association of America (1981) p.31 -^ Jump up to: a b Campbell, John Naval Weapons of World War Two (1985) p. 174 -Jump up ^ Davis, Tenny L. The Chemistry of Powder & Explosives (1943) pages 307–311 -Jump up ^ Davis, Tenny L. The Chemistry of Powder & Explosives (1943) p. 302 -^ Jump up to: a b Davis, Tenny L. The Chemistry of Powder & Explosives (1943) p. 296 -Jump up ^ Davis, Tenny L. The Chemistry of Powder & Explosives (1943) p. 307 -Jump up ^ Davis, Tenny L. The Chemistry of Powder & Explosives (1943) p. 308 -^ Jump up to: a b Davis, William C., Jr. Handloading National Rifle Association of America (1981) p.32 -Jump up ^ Davis, Tenny L. The Chemistry of Powder & Explosives (1943) pages 322–327 -^ Jump up to: a b c Davis, Tenny L. The Chemistry of Powder & Explosives (1943) pages 323–327 -Jump up ^ "USA 16"/50 (40.6 cm) Mark 7". NavWeaps. 2008-11-03. Retrieved 2008-12-05. -Jump up ^ Fairfield, A. P., CDR USN Naval Ordnance Lord Baltimore Press (1921) pages 28–31 -Jump up ^ Fairfield, A. P., CDR USN Naval Ordnance Lord Baltimore Press (1921) pages 31–35 -Jump up ^ Fairfield, A. P., CDR USN Naval Ordnance Lord Baltimore Press (1921) pages 35–41 -Jump up ^ Davis, Tenny L. The Chemistry of Powder & Explosives (1943) pages 293 & 306 -Jump up ^ Fairfield, A. P., CDR USN Naval Ordnance Lord Baltimore Press (1921) p.39 -Jump up ^ Matunas, E. A. Winchester-Western Ball Powder Loading Data Olin Corporation (1978) p.3 -Jump up ^ Davis, Tenny L. The Chemistry of Powder & Explosives (1943) pages 328–330 -Jump up ^ Wolfe, Dave Propellant Profiles Volume 1 Wolfe Publishing Company (1982) pages 136–137 -Jump up ^ General Dynamics Commercial Powder Applications. -Jump up ^ Moss G. M., Leeming D. W., Farrar C. L. Military Ballisitcs (1969) pages 55–56 -^ Jump up to: a b Davis, Tenny L. The Chemistry of Powder & Explosives (1943) pages 322–323 -Jump up ^ Milner p.68 -Jump up ^ Moss G. M., Leeming D. W., Farrar C. L. Military Ballisitcs (1969) pages 59–60 -Sources[edit] -Campbell, John (1985). Naval Weapons of World War Two. Naval Institute Press. ISBN 0-87021-459-4. -Davis, Tenney L. (1943). The Chemistry of Powder & Explosives (Angriff Press [1992] ed.). John Wiley & Sons Inc. ISBN 0-913022-00-4. -Davis, William C., Jr. (1981). Handloading. National Rifle Association of America. ISBN 0-935998-34-9. -Fairfield, A. P., CDR USN (1921). Naval Ordnance. Lord Baltimore Press. -Hatcher, Julian S. and Barr, Al (1951). Handloading. Hennage Lithograph Company. -Matunas, E. A. (1978). Winchester-Western Ball Powder Loading Data. Olin Corporation. -Milner, Marc (1985). North Atlantic Run. Naval Institute Press. ISBN 0-87021-450-0. -Wolfe, Dave (1982). Propellant Profiles Volume 1. Wolfe Publishing Company. ISBN 0-935632-10-7. -External links[edit] -The Manufacture of Smokeless Powders and their Forensic Analysis: A Brief Review – Robert M. Heramb, Bruce R. McCord -Hudson Maxim papers (1851-1925) at Hagley Museum and Library. Collection includes material relating to Maxim's patent on the process of making smokeless powder. -Categories: CorditeExplosivesFirearm propellantsSolid fuels -Navigation menu -Create accountLog inArticleTalkReadEditView history - -Main page -Contents -Featured content -Current events -Random article -Donate to Wikipedia -Wikimedia Shop -Interaction -Help -About Wikipedia -Community portal -Recent changes -Contact page -Tools -What links here -Related changes -Upload file -Special pages -Permanent link -Page information -Wikidata item -Cite this page -Print/export -Create a book -Download as PDF -Printable version -Languages -العربية -Български -Dansk -Deutsch -Español -فارسی -Français -Bahasa Indonesia -Íslenska -Italiano -עברית -Nederlands -日本語 -Polski -Português -Русский -Svenska -தமிழ் -中文 -Edit links -This page was last modified on 25 July 2014 at 22:33. -Text is available under the Creative Commons Attribution-ShareAlike License; additional terms may apply. By using this site, you agree to the Terms of Use and Privacy Policy. Wikipedia® is a registered trademark of the Wikimedia Foundation, Inc., a non-profit organization. -Privacy policyAbout WikipediaDisclaimersContact WikipediaDevelopersMobile viewWikimedia Foundation Powered by MediaWiki - - -Deflagration -From Wikipedia, the free encyclopedia - -[hide]This article has multiple issues. Please help improve it or discuss these issues on the talk page. -This article needs additional citations for verification. (April 2011) -This article may be too technical for most readers to understand. (December 2013) - -A log in a fireplace. -Deflagration [1] (Lat: de + flagrare, "to burn down") is a term describing subsonic combustion propagating through heat transfer; hot burning material heats the next layer of cold material and ignites it. Most "fire" found in daily life, from flames to explosions, is deflagration. Deflagration is different from detonation, which is supersonic and propagates through shock. -Contents [hide] -1 Applications -2 Oil/wax fire and water -3 Flame physics -4 Damaging deflagration events -5 See also -6 References -Applications[edit] -In engineering applications, deflagrations are easier to control than detonations. Consequently, they are better suited when the goal is to move an object (a bullet in a gun, or a piston in an internal combustion engine) with the force of the expanding gas. Typical examples of deflagrations are the combustion of a gas-air mixture in a gas stove or a fuel-air mixture in an internal combustion engine, and the rapid burning of gunpowder in a firearm or of pyrotechnic mixtures in fireworks. Deflagration systems and products can also be used in mining, demolition and stone quarrying via gas pressure blasting as a beneficial alternative to high explosives. -Oil/wax fire and water[edit] -Adding water to a burning hydrocarbon such as oil or wax produces a deflagration. The water boils rapidly and ejects the burning material as a fine spray of droplets. A deflagration then occurs as the fine mist of oil ignites and burns extremely rapidly. These are particularly common in chip pan fires, which are responsible for one in five household fires in Britain.[2] -Flame physics[edit] -The underlying flame physics can be understood with the help of an idealized model consisting of a uniform one-dimensional tube of unburnt and burned gaseous fuel, separated by a thin transitional region of width \delta\; in which the burning occurs. The burning region is commonly referred to as the flame or flame front. In equilibrium, thermal diffusion across the flame front is balanced by the heat supplied by burning. -There are two characteristic timescales which are important here. The first is the thermal diffusion timescale \tau_d\;, which is approximately equal to -\tau_d \simeq \delta^2 / \kappa, -where \kappa \; is the thermal diffusivity. The second is the burning timescale \tau_b that strongly decreases with temperature, typically as -\tau_b\propto \exp[\Delta U/(k_B T_f)], -where \Delta U\; is the activation barrier for the burning reaction and T_f\; is the temperature developed as the result of burning; the value of this so-called "flame temperature" can be determined from the laws of thermodynamics. -For a stationary moving deflagration front, these two timescales must be equal: the heat generated by burning is equal to the heat carried away by heat transfer. This makes it possible to calculate the characteristic width \delta\; of the flame front: -\tau_b = \tau_d\;, -thus - \delta \simeq \sqrt {\kappa \tau_b} . -Now, the thermal flame front propagates at a characteristic speed S_l\;, which is simply equal to the flame width divided by the burn time: -S_l \simeq \delta / \tau_b \simeq \sqrt {\kappa / \tau_b} . -This simplified model neglects the change of temperature and thus the burning rate across the deflagration front. This model also neglects the possible influence of turbulence. As a result, this derivation gives only the laminar flame speed -- hence the designation S_l\;. -Damaging deflagration events[edit] -Damage to buildings, equipment and people can result from a large-scale, short-duration deflagration. The potential damage is primarily a function of the total amount of fuel burned in the event (total energy available), the maximum flame velocity that is achieved, and the manner in which the expansion of the combustion gases is contained. -In free-air deflagrations, there is a continuous variation in deflagration effects relative to the maximum flame velocity. When flame velocities are low, the effect of a deflagration is to release heat. Some authors use the term flash fire to describe these low-speed deflagrations. At flame velocities near the speed of sound, the energy released is in the form of pressure and the results resemble a detonation. Between these extremes both heat and pressure are released. -When a low-speed deflagration occurs within a closed vessel or structure, pressure effects can produce damage due to expansion of gases as a secondary effect. The heat released by the deflagration causes the combustion gases and excess air to expand thermally. The net result is that the volume of the vessel or structure must expand to accommodate the hot combustion gases, or the vessel must be strong enough to withstand the additional internal pressure, or it fails, allowing the gases to escape. The risks of deflagration inside waste storage drums is a growing concern in storage facilities. -See also[edit] - Look up deflagration in Wiktionary, the free dictionary. -Pressure piling -References[edit] -Jump up ^ "Glossary D-H". Hutchisonrodway.co.nz. Retrieved 2013-12-29. -Jump up ^ UK Fire Service advice on chip pan fires -Categories: Explosives -Navigation menu -Create accountLog inArticleTalkReadEditView history - -Main page -Contents -Featured content -Current events -Random article -Donate to Wikipedia -Wikimedia Shop -Interaction -Help -About Wikipedia -Community portal -Recent changes -Contact page -Tools -What links here -Related changes -Upload file -Special pages -Permanent link -Page information -Wikidata item -Cite this page -Print/export -Create a book -Download as PDF -Printable version -Languages -Català -Čeština -Deutsch -Español -Français -Italiano -Lietuvių -Nederlands -Norsk bokmål -Polski -Português -Русский -Српски / srpski -Svenska -Edit links -This page was last modified on 2 October 2014 at 16:44. -Text is available under the Creative Commons Attribution-ShareAlike License; additional terms may apply. By using this site, you agree to the Terms of Use and Privacy Policy. Wikipedia® is a registered trademark of the Wikimedia Foundation, Inc., a non-profit organization. -Privacy policyAbout WikipediaDisclaimersContact WikipediaDevelopersMobile viewWikimedia Foundation Powered by MediaWiki - - -United Kingdom -From Wikipedia, the free encyclopedia -This article is about the sovereign state. For the island, see Great Britain. For other uses, see United Kingdom (disambiguation) and UK (disambiguation). -Page semi-protected -United Kingdom of Great -Britain and Northern Ireland[show] - -A flag featuring both cross and saltire in red, white and blue Coat of arms containing shield and crown in centre, flanked by lion and unicorn -Flag Royal coat of arms[nb 1] -Anthem: "God Save the Queen"[nb 2] -MENU0:00 -Two islands to the north-west of continental Europe. Highlighted are the larger island and the north-eastern fifth of the smaller island to the west. -Location of the United Kingdom (dark green) -– in Europe (green & dark grey) -– in the European Union (green) -Capital -and largest city London -51°30′N 0°7′W -Official language -and national language English -Recognised regional -languages Cornish, Irish, Scots, Scottish Gaelic, Ulster-Scots, Welsh[nb 3] -Ethnic groups (2011) 87.1% White -7.0% Asian -3.0% Black -2.0% Mixed -0.9% Other -Demonym British, Briton -Government Unitary parliamentary constitutional monarchy - - Monarch Elizabeth II - - Prime Minister David Cameron -Legislature Parliament - - Upper house House of Lords - - Lower house House of Commons -Formation - - Acts of Union 1707 1 May 1707 - - Acts of Union 1800 1 January 1801 - - Irish Free State Constitution Act 5 December 1922 -Area - - Total 243,610 km2 (80th) -94,060 sq mi - - Water (%) 1.34 -Population - - 2013 estimate 64,100,000[3] (22nd) - - 2011 census 63,181,775[4] (22nd) - - Density 255.6/km2 (51st) -661.9/sq mi -GDP (PPP) 2014 estimate - - Total $2.435 trillion[5] (10th) - - Per capita $37,744[5] (27th) -GDP (nominal) 2014 estimate - - Total $2.848 trillion[5] (6th) - - Per capita $44,141[5] (22nd) -Gini (2012) positive decrease 32.8[6] -medium · 33rd -HDI (2013) Steady 0.892[7] -very high · 14th -Currency Pound sterling (GBP) -Time zone GMT (UTC​) - - Summer (DST) BST (UTC+1) -Date format dd/mm/yyyy (AD) -Drives on the left -Calling code +44 -ISO 3166 code GB -Internet TLD .uk -The United Kingdom of Great Britain and Northern Ireland Listeni/ɡreɪt ˈbrɪt(ə)n ənd ˈnɔːð(ə)n ˈʌɪələnd/, commonly known as the United Kingdom (UK) or Britain, is a sovereign state in Europe. Lying off the north-western coast of the European mainland, the country includes the island of Great Britain (a term also applied loosely to refer to the whole country),[8] the north-eastern part of the island of Ireland, and many smaller islands. Northern Ireland is the only part of the UK that shares a land border with another state: the Republic of Ireland.[nb 4] Apart from this land border, the UK is surrounded by the Atlantic Ocean, with the North Sea in the east and the English Channel in the south. The Irish Sea lies between Great Britain and Ireland. The UK has an area of 243,610 square kilometres (94,060 sq mi), making it the 78th-largest sovereign state in the world and the 11th-largest in Europe. -The United Kingdom is the 22nd-most populous country, with an estimated 64.1 million inhabitants.[3] It is a constitutional monarchy with a parliamentary system of governance.[9][10] Its capital city is London, an important global city and financial centre with the fourth-largest urban area in Europe.[11] The current monarch—since 6 February 1952—is Queen Elizabeth II. The UK consists of four countries: England, Scotland, Wales, and Northern Ireland.[12] The latter three have devolved administrations,[13] each with varying powers,[14][15] based in their capitals, Edinburgh, Cardiff, and Belfast, respectively. Guernsey, Jersey, and the Isle of Man are not part of the United Kingdom, being Crown dependencies with the British Government responsible for defence and international representation.[16] The UK has fourteen Overseas Territories,[17] including the disputed Falkland Islands, Gibraltar, and Indian Ocean Territory. -The relationships among the countries of the United Kingdom have changed over time. Wales was annexed by the Kingdom of England under the Acts of Union of 1536 and 1543. A treaty between England and Scotland resulted in a unified Kingdom of Great Britain in 1707, which in 1801, merged with the Kingdom of Ireland to form the United Kingdom of Great Britain and Ireland. In 1922, five-sixths of Ireland seceded from the country, leaving the present formulation of the United Kingdom of Great Britain and Northern Ireland.[nb 5] British Overseas Territories, formerly colonies, are the remnants of the British Empire which, at its height in the late 19th and early 20th centuries, encompassed almost a quarter of the world's land mass and was the largest empire in history. British influence can be observed in the language, culture, and legal systems of many of its former colonies. -The United Kingdom is a developed country and has the world's sixth-largest economy by nominal GDP and tenth-largest by purchasing power parity. The country is considered to have a high-income economy and is categorised as very high in the Human Development Index, currently ranking 14th in the world. It was the world's first industrialised country and the world's foremost power during the 19th and early 20th centuries.[18][19] The UK remains a great power with considerable economic, cultural, military, scientific, and political influence internationally.[20][21] It is a recognised nuclear weapons state and its military expenditure ranks fifth or sixth in the world.[22][23] The UK has been a permanent member of the United Nations Security Council since its first session in 1946. It has been a member state of the European Union (EU) and its predecessor, the European Economic Community (EEC), since 1973; it is also a member of the Commonwealth of Nations, the Council of Europe, the G7, the G8, the G20, NATO, the Organisation for Economic Co-operation and Development (OECD), and the World Trade Organization (WTO). -Contents [hide] -1 Etymology and terminology -2 History -2.1 Before 1707 -2.2 Since the Acts of Union of 1707 -3 Geography -3.1 Climate -3.2 Administrative divisions -4 Dependencies -5 Politics -5.1 Government -5.2 Devolved administrations -5.3 Law and criminal justice -5.4 Foreign relations -5.5 Military -6 Economy -6.1 Science and technology -6.2 Transport -6.3 Energy -7 Demographics -7.1 Ethnic groups -7.2 Languages -7.3 Religion -7.4 Migration -7.5 Education -7.6 Healthcare -8 Culture -8.1 Literature -8.2 Music -8.3 Visual art -8.4 Cinema -8.5 Media -8.6 Philosophy -8.7 Sport -8.8 Symbols -9 See also -10 Notes -11 References -12 Further reading -13 External links -Etymology and terminology -See also: Britain (placename) and Terminology of the British Isles -The 1707 Acts of Union declared that the kingdoms of England and Scotland were "United into One Kingdom by the Name of Great Britain", though the new state is also referred to in the Acts as the "Kingdom of Great Britain", "United Kingdom of Great Britain" and "United Kingdom".[24][25][nb 6] However, the term "united kingdom" is only found in informal use during the 18th century and the country was only occasionally referred to as he "United Kingdom of Great Britain".[26] The Acts of Union 1800 united the Kingdom of Great Britain and the Kingdom of Ireland in 1801, forming the United Kingdom of Great Britain and Ireland. The name "United Kingdom of Great Britain and Northern Ireland" was adopted following the independence of the Irish Free State, and the partition of Ireland, in 1922, which left Northern Ireland as the only part of the island of Ireland within the UK.[27] -Although the United Kingdom, as a sovereign state, is a country, England, Scotland, Wales, and to a lesser degree, Northern Ireland, are also regarded as countries, though they are not sovereign states.[28][29] Scotland, Wales and Northern Ireland have devolved self-government.[30][31] The British Prime Minister's website has used the phrase "countries within a country" to describe the United Kingdom.[12] Some statistical summaries, such as those for the twelve NUTS 1 regions of the UK, also refer to Scotland, Wales and Northern Ireland as "regions".[32][33] Northern Ireland is also referred to as a "province".[28][34] With regard to Northern Ireland, the descriptive name used "can be controversial, with the choice often revealing one's political preferences."[35] -The term Britain is often used as synonym for the United Kingdom. The term Great Britain, by contrast, refers conventionally to the island of Great Britain, or politically to England, Scotland and Wales in combination.[36][37][38] However, it is sometimes used as a loose synonym for the United Kingdom as a whole.[39][40] GB and GBR are the standard country codes for the United Kingdom (see ISO 3166-2 and ISO 3166-1 alpha-3) and are consequently used by international organisations to refer to the United Kingdom. Additionally, the United Kingdom's Olympic team competes under the name "Great Britain" or "Team GB".[41][42] -The adjective British is commonly used to refer to matters relating to the United Kingdom. The term has no definite legal connotation, but is used in law to refer to UK citizenship and matters to do with nationality.[43] People of the United Kingdom use a number of different terms to describe their national identity and may identify themselves as being British; or as being English, Scottish, Welsh, Northern Irish, or Irish;[44] or as being both.[45] -In 2006, a new design of British passport was introduced. Its first page shows the long form name of the state in English, Welsh and Scottish Gaelic.[46] In Welsh, the long form name of the state is "Teyrnas Unedig Prydain Fawr a Gogledd Iwerddon" with "Teyrnas Unedig" being used as a short form name on government websites.[47] In Scottish Gaelic, the long form is "Rìoghachd Aonaichte Bhreatainn is Èireann a Tuath" and the short form "Rìoghachd Aonaichte". -History -See also: History of the British Isles -Before 1707 - -Stonehenge, in Wiltshire, was erected around 2500 BC. -Main articles: History of England, History of Wales, History of Scotland, History of Ireland and History of the formation of the United Kingdom -Settlement by anatomically modern humans of what was to become the United Kingdom occurred in waves beginning by about 30,000 years ago.[48] By the end of the region's prehistoric period, the population is thought to have belonged, in the main, to a culture termed Insular Celtic, comprising Brythonic Britain and Gaelic Ireland.[49] The Roman conquest, beginning in 43 AD, and the 400-year rule of southern Britain, was followed by an invasion by Germanic Anglo-Saxon settlers, reducing the Brythonic area mainly to what was to become Wales and the historic Kingdom of Strathclyde.[50] Most of the region settled by the Anglo-Saxons became unified as the Kingdom of England in the 10th century.[51] Meanwhile, Gaelic-speakers in north west Britain (with connections to the north-east of Ireland and traditionally supposed to have migrated from there in the 5th century)[52][53] united with the Picts to create the Kingdom of Scotland in the 9th century.[54] -In 1066, the Normans invaded England from France and after its conquest, seized large parts of Wales, conquered much of Ireland and were invited to settle in Scotland, bringing to each country feudalism on the Northern French model and Norman-French culture.[55] The Norman elites greatly influenced, but eventually assimilated with, each of the local cultures.[56] Subsequent medieval English kings completed the conquest of Wales and made an unsuccessful attempt to annex Scotland. Thereafter, Scotland maintained its independence, albeit in near-constant conflict with England. The English monarchs, through inheritance of substantial territories in France and claims to the French crown, were also heavily involved in conflicts in France, most notably the Hundred Years War, while the Kings of Scots were in an alliance with the French during this period.[57] - -The Bayeux Tapestry depicts the Battle of Hastings and the events leading to it. -The early modern period saw religious conflict resulting from the Reformation and the introduction of Protestant state churches in each country.[58] Wales was fully incorporated into the Kingdom of England,[59] and Ireland was constituted as a kingdom in personal union with the English crown.[60] In what was to become Northern Ireland, the lands of the independent Catholic Gaelic nobility were confiscated and given to Protestant settlers from England and Scotland.[61] -In 1603, the kingdoms of England, Scotland and Ireland were united in a personal union when James VI, King of Scots, inherited the crowns of England and Ireland and moved his court from Edinburgh to London; each country nevertheless remained a separate political entity and retained its separate political, legal, and religious institutions.[62][63] -In the mid-17th century, all three kingdoms were involved in a series of connected wars (including the English Civil War) which led to the temporary overthrow of the monarchy and the establishment of the short-lived unitary republic of the Commonwealth of England, Scotland and Ireland.[64][65] -Although the monarchy was restored, it ensured (with the Glorious Revolution of 1688) that, unlike much of the rest of Europe, royal absolutism would not prevail, and a professed Catholic could never accede to the throne. The British constitution would develop on the basis of constitutional monarchy and the parliamentary system.[66] During this period, particularly in England, the development of naval power (and the interest in voyages of discovery) led to the acquisition and settlement of overseas colonies, particularly in North America.[67][68] -Since the Acts of Union of 1707 -Main article: History of the United Kingdom - -The Treaty of Union led to a single united kingdom encompassing all Great Britain. -On 1 May 1707, the united kingdom of Great Britain came into being, the result of Acts of Union being passed by the parliaments of England and Scotland to ratify the 1706 Treaty of Union and so unite the two kingdoms.[69][70][71] -In the 18th century, cabinet government developed under Robert Walpole, in practice the first prime minister (1721–1742). A series of Jacobite Uprisings sought to remove the Protestant House of Hanover from the British throne and restore the Catholic House of Stuart. The Jacobites were finally defeated at the Battle of Culloden in 1746, after which the Scottish Highlanders were brutally suppressed. The British colonies in North America that broke away from Britain in the American War of Independence became the United States of America in 1782. British imperial ambition turned elsewhere, particularly to India.[72] -During the 18th century, Britain was involved in the Atlantic slave trade. British ships transported an estimated 2 million slaves from Africa to the West Indies before banning the trade in 1807.[73] The term 'United Kingdom' became official in 1801 when the parliaments of Britain and Ireland each passed an Act of Union, uniting the two kingdoms and creating the United Kingdom of Great Britain and Ireland.[74] -In the early 19th century, the British-led Industrial Revolution began to transform the country. It slowly led to a shift in political power away from the old Tory and Whig landowning classes towards the new industrialists. An alliance of merchants and industrialists with the Whigs would lead to a new party, the Liberals, with an ideology of free trade and laissez-faire. In 1832 Parliament passed the Great Reform Act, which began the transfer of political power from the aristocracy to the middle classes. In the countryside, enclosure of the land was driving small farmers out. Towns and cities began to swell with a new urban working class. Few ordinary workers had the vote, and they created their own organisations in the form of trade unions. -Painting of a bloody battle. Horses and infantry fight or lie on grass. -The Battle of Waterloo marked the end of the Napoleonic Wars and the start of Pax Britannica. -After the defeat of France in the Revolutionary and Napoleonic Wars (1792–1815), the UK emerged as the principal naval and imperial power of the 19th century (with London the largest city in the world from about 1830).[75] Unchallenged at sea, British dominance was later described as Pax Britannica.[76][77] By the time of the Great Exhibition of 1851, Britain was described as the "workshop of the world".[78] The British Empire was expanded to include India, large parts of Africa and many other territories throughout the world. Alongside the formal control it exerted over its own colonies, British dominance of much of world trade meant that it effectively controlled the economies of many countries, such as China, Argentina and Siam.[79][80] Domestically, political attitudes favoured free trade and laissez-faire policies and a gradual widening of the voting franchise. During the century, the population increased at a dramatic rate, accompanied by rapid urbanisation, causing significant social and economic stresses.[81] After 1875, the UK's industrial monopoly was challenged by Germany and the USA. To seek new markets and sources of raw materials, the Conservative Party under Disraeli launched a period of imperialist expansion in Egypt, South Africa and elsewhere. Canada, Australia and New Zealand became self-governing dominions.[82] -Social reform and home rule for Ireland were important domestic issues after 1900. The Labour Party emerged from an alliance of trade unions and small Socialist groups in 1900, and suffragettes campaigned for women's right to vote before 1914. -Black-and-white photo of two dozen men in military uniforms and metal helmets sitting or standing in a muddy trench. -Infantry of the Royal Irish Rifles during the Battle of the Somme. More than 885,000 British soldiers died on the battlefields of World War I. -The UK fought with France, Russia and (after 1917) the US, against Germany and its allies in World War I (1914–18).[83] The UK armed forces were engaged across much of the British Empire and in several regions of Europe, particularly on the Western front.[84] The high fatalities of trench warfare caused the loss of much of a generation of men, with lasting social effects in the nation and a great disruption in the social order. -After the war, the UK received the League of Nations mandate over a number of former German and Ottoman colonies. The British Empire reached its greatest extent, covering a fifth of the world's land surface and a quarter of its population.[85] However, the UK had suffered 2.5 million casualties and finished the war with a huge national debt.[84] The rise of Irish Nationalism and disputes within Ireland over the terms of Irish Home Rule led eventually to the partition of the island in 1921,[86] and the Irish Free State became independent with Dominion status in 1922. Northern Ireland remained part of the United Kingdom.[87] A wave of strikes in the mid-1920s culminated in the UK General Strike of 1926. The UK had still not recovered from the effects of the war when the Great Depression (1929–32) occurred. This led to considerable unemployment and hardship in the old industrial areas, as well as political and social unrest in the 1930s. A coalition government was formed in 1931.[88] -The UK entered World War II by declaring war on Germany in 1939, after it had invaded Poland and Czechoslovakia. In 1940, Winston Churchill became prime minister and head of a coalition government. Despite the defeat of its European allies in the first year of the war, the UK continued the fight alone against Germany. In 1940, the RAF defeated the German Luftwaffe in a struggle for control of the skies in the Battle of Britain. The UK suffered heavy bombing during the Blitz. There were also eventual hard-fought victories in the Battle of the Atlantic, the North Africa campaign and Burma campaign. UK forces played an important role in the Normandy landings of 1944, achieved with its ally the US. After Germany's defeat, the UK was one of the Big Three powers who met to plan the post-war world; it was an original signatory to the Declaration of the United Nations. The UK became one of the five permanent members of the United Nations Security Council. However, the war left the UK severely weakened and depending financially on Marshall Aid and loans from the United States.[89] -Map of the world. Canada, the eastern United States, countries in east Africa, India, most of Australasia and some other countries are highlighted in pink. -Territories that were at one time part of the British Empire. Current British Overseas Territories are underlined in red. -In the immediate post-war years, the Labour government initiated a radical programme of reforms, which had a significant effect on British society in the following decades.[90] Major industries and public utilities were nationalised, a Welfare State was established, and a comprehensive, publicly funded healthcare system, the National Health Service, was created.[91] The rise of nationalism in the colonies coincided with Britain's now much-diminished economic position, so that a policy of decolonisation was unavoidable. Independence was granted to India and Pakistan in 1947.[92] Over the next three decades, most colonies of the British Empire gained their independence. Many became members of the Commonwealth of Nations.[93] -Although the UK was the third country to develop a nuclear weapons arsenal (with its first atomic bomb test in 1952), the new post-war limits of Britain's international role were illustrated by the Suez Crisis of 1956. The international spread of the English language ensured the continuing international influence of its literature and culture. From the 1960s onward, its popular culture was also influential abroad. As a result of a shortage of workers in the 1950s, the UK government encouraged immigration from Commonwealth countries. In the following decades, the UK became a multi-ethnic society.[94] Despite rising living standards in the late 1950s and 1960s, the UK's economic performance was not as successful as many of its competitors, such as West Germany and Japan. In 1973, the UK joined the European Economic Community (EEC), and when the EEC became the European Union (EU) in 1992, it was one of the 12 founding members. - -After the two vetos of France in 1961 and 1967, the UK entered in the European Union in 1973. In 1975, 67% of Britons voted yes to the permanence in the European Union. -From the late 1960s, Northern Ireland suffered communal and paramilitary violence (sometimes affecting other parts of the UK) conventionally known as the Troubles. It is usually considered to have ended with the Belfast "Good Friday" Agreement of 1998.[95][96][97] -Following a period of widespread economic slowdown and industrial strife in the 1970s, the Conservative Government of the 1980s initiated a radical policy of monetarism, deregulation, particularly of the financial sector (for example, Big Bang in 1986) and labour markets, the sale of state-owned companies (privatisation), and the withdrawal of subsidies to others.[98] This resulted in high unemployment and social unrest, but ultimately also economic growth, particularly in the services sector. From 1984, the economy was helped by the inflow of substantial North Sea oil revenues.[99] -Around the end of the 20th century there were major changes to the governance of the UK with the establishment of devolved administrations for Scotland, Wales and Northern Ireland.[13][100] The statutory incorporation followed acceptance of the European Convention on Human Rights. The UK is still a key global player diplomatically and militarily. It plays leading roles in the EU, UN and NATO. However, controversy surrounds some of Britain's overseas military deployments, particularly in Afghanistan and Iraq.[101] -The 2008 global financial crisis severely affected the UK economy. The coalition government of 2010 introduced austerity measures intended to tackle the substantial public deficits which resulted.[102] In 2014 the Scottish Government held a referendum on Scottish independence, with the majority of voters rejecting the independence proposal and opting to remain within the United Kingdom.[103] -Geography -Main article: Geography of the United Kingdom -Map of United Kingdom showing hilly regions to north and west, and flattest region in the south-east. -The topography of the UK -The total area of the United Kingdom is approximately 243,610 square kilometres (94,060 sq mi). The country occupies the major part of the British Isles[104] archipelago and includes the island of Great Britain, the northeastern one-sixth of the island of Ireland and some smaller surrounding islands. It lies between the North Atlantic Ocean and the North Sea with the south-east coast coming within 22 miles (35 km) of the coast of northern France, from which it is separated by the English Channel.[105] In 1993 10% of the UK was forested, 46% used for pastures and 25% cultivated for agriculture.[106] The Royal Greenwich Observatory in London is the defining point of the Prime Meridian.[107] -The United Kingdom lies between latitudes 49° to 61° N, and longitudes 9° W to 2° E. Northern Ireland shares a 224-mile (360 km) land boundary with the Republic of Ireland.[105] The coastline of Great Britain is 11,073 miles (17,820 km) long.[108] It is connected to continental Europe by the Channel Tunnel, which at 31 miles (50 km) (24 miles (38 km) underwater) is the longest underwater tunnel in the world.[109] -England accounts for just over half of the total area of the UK, covering 130,395 square kilometres (50,350 sq mi).[110] Most of the country consists of lowland terrain,[106] with mountainous terrain north-west of the Tees-Exe line; including the Cumbrian Mountains of the Lake District, the Pennines and limestone hills of the Peak District, Exmoor and Dartmoor. The main rivers and estuaries are the Thames, Severn and the Humber. England's highest mountain is Scafell Pike (978 metres (3,209 ft)) in the Lake District. Its principal rivers are the Severn, Thames, Humber, Tees, Tyne, Tweed, Avon, Exe and Mersey.[106] -Scotland accounts for just under a third of the total area of the UK, covering 78,772 square kilometres (30,410 sq mi)[111] and including nearly eight hundred islands,[112] predominantly west and north of the mainland; notably the Hebrides, Orkney Islands and Shetland Islands. The topography of Scotland is distinguished by the Highland Boundary Fault – a geological rock fracture – which traverses Scotland from Arran in the west to Stonehaven in the east.[113] The faultline separates two distinctively different regions; namely the Highlands to the north and west and the lowlands to the south and east. The more rugged Highland region contains the majority of Scotland's mountainous land, including Ben Nevis which at 1,343 metres (4,406 ft) is the highest point in the British Isles.[114] Lowland areas – especially the narrow waist of land between the Firth of Clyde and the Firth of Forth known as the Central Belt – are flatter and home to most of the population including Glasgow, Scotland's largest city, and Edinburgh, its capital and political centre. -A view of Ben Nevis in the distance, fronted by rolling plains -Ben Nevis, in Scotland, is the highest point in the British Isles -Wales accounts for less than a tenth of the total area of the UK, covering 20,779 square kilometres (8,020 sq mi).[115] Wales is mostly mountainous, though South Wales is less mountainous than North and mid Wales. The main population and industrial areas are in South Wales, consisting of the coastal cities of Cardiff, Swansea and Newport, and the South Wales Valleys to their north. The highest mountains in Wales are in Snowdonia and include Snowdon (Welsh: Yr Wyddfa) which, at 1,085 metres (3,560 ft), is the highest peak in Wales.[106] The 14, or possibly 15, Welsh mountains over 3,000 feet (914 m) high are known collectively as the Welsh 3000s. Wales has over 2,704 kilometres (1,680 miles) of coastline.[116] Several islands lie off the Welsh mainland, the largest of which is Anglesey (Ynys Môn) in the northwest. -Northern Ireland, separated from Great Britain by the Irish Sea and North Channel, has an area of 14,160 square kilometres (5,470 sq mi) and is mostly hilly. It includes Lough Neagh which, at 388 square kilometres (150 sq mi), is the largest lake in the British Isles by area.[117] The highest peak in Northern Ireland is Slieve Donard in the Mourne Mountains at 852 metres (2,795 ft).[106] -Climate -Main article: Climate of the United Kingdom -The United Kingdom has a temperate climate, with plentiful rainfall all year round.[105] The temperature varies with the seasons seldom dropping below −11 °C (12 °F) or rising above 35 °C (95 °F).[118] The prevailing wind is from the south-west and bears frequent spells of mild and wet weather from the Atlantic Ocean,[105] although the eastern parts are mostly sheltered from this wind since the majority of the rain falls over the western regions the eastern parts are therefore the driest. Atlantic currents, warmed by the Gulf Stream, bring mild winters; especially in the west where winters are wet and even more so over high ground. Summers are warmest in the south-east of England, being closest to the European mainland, and coolest in the north. Heavy snowfall can occur in winter and early spring on high ground, and occasionally settles to great depth away from the hills. -Administrative divisions -Main article: Administrative geography of the United Kingdom -Each country of the United Kingdom has its own system of administrative and geographic demarcation, whose origins often pre-date the formation of the United Kingdom. Thus there is "no common stratum of administrative unit encompassing the United Kingdom".[119] Until the 19th century there was little change to those arrangements, but there has since been a constant evolution of role and function.[120] Change did not occur in a uniform manner and the devolution of power over local government to Scotland, Wales and Northern Ireland means that future changes are unlikely to be uniform either. -The organisation of local government in England is complex, with the distribution of functions varying according to local arrangements. Legislation concerning local government in England is the responsibility of the UK parliament and the Government of the United Kingdom, as England has no devolved parliament. The upper-tier subdivisions of England are the nine Government office regions or European Union government office regions.[121] One region, Greater London, has had a directly elected assembly and mayor since 2000 following popular support for the proposal in a referendum.[122] It was intended that other regions would also be given their own elected regional assemblies, but a proposed assembly in the North East region was rejected by a referendum in 2004.[123] Below the regional tier, some parts of England have county councils and district councils and others have unitary authorities; while London consists of 32 London boroughs and the City of London. Councillors are elected by the first-past-the-post system in single-member wards or by the multi-member plurality system in multi-member wards.[124] -For local government purposes, Scotland is divided into 32 council areas, with wide variation in both size and population. The cities of Glasgow, Edinburgh, Aberdeen and Dundee are separate council areas, as is the Highland Council which includes a third of Scotland's area but only just over 200,000 people. Local councils are made up of elected councillors, of whom there are currently 1,222;[125] they are paid a part-time salary. Elections are conducted by single transferable vote in multi-member wards that elect either three or four councillors. Each council elects a Provost, or Convenor, to chair meetings of the council and to act as a figurehead for the area. Councillors are subject to a code of conduct enforced by the Standards Commission for Scotland.[126] The representative association of Scotland's local authorities is the Convention of Scottish Local Authorities (COSLA).[127] -Local government in Wales consists of 22 unitary authorities. These include the cities of Cardiff, Swansea and Newport which are unitary authorities in their own right.[128] Elections are held every four years under the first-past-the-post system.[129] The most recent elections were held in May 2012, except for the Isle of Anglesey. The Welsh Local Government Association represents the interests of local authorities in Wales.[130] -Local government in Northern Ireland has since 1973 been organised into 26 district councils, each elected by single transferable vote. Their powers are limited to services such as collecting waste, controlling dogs and maintaining parks and cemeteries.[131] On 13 March 2008 the executive agreed on proposals to create 11 new councils and replace the present system.[132] The next local elections were postponed until 2016 to facilitate this.[133] -Dependencies - -A view of the Caribbean Sea from the Cayman Islands, one of the world's foremost international financial centres[134] and tourist destinations.[135] -Main articles: British Overseas Territories, Crown dependencies and British Islands -The United Kingdom has sovereignty over seventeen territories which do not form part of the United Kingdom itself: fourteen British Overseas Territories[136] and three Crown dependencies.[137] -The fourteen British Overseas Territories are: Anguilla; Bermuda; the British Antarctic Territory; the British Indian Ocean Territory; the British Virgin Islands; the Cayman Islands; the Falkland Islands; Gibraltar; Montserrat; Saint Helena, Ascension and Tristan da Cunha; the Turks and Caicos Islands; the Pitcairn Islands; South Georgia and the South Sandwich Islands; and Sovereign Base Areas on Cyprus.[138] British claims in Antarctica are not universally recognised.[139] Collectively Britain's overseas territories encompass an approximate land area of 1,727,570 square kilometres (667,018 sq mi) and a population of approximately 260,000 people.[140] They are the remnants of the British Empire and several have specifically voted to remain British territories (Bermuda in 1995, Gibraltar in 2002 and the Falkland Islands in 2013).[141] -The Crown dependencies are possessions of the Crown, as opposed to overseas territories of the UK.[142] They comprise three independently administered jurisdictions: the Channel Islands of Jersey and Guernsey in the English Channel, and the Isle of Man in the Irish Sea. By mutual agreement, the British Government manages the islands' foreign affairs and defence and the UK Parliament has the authority to legislate on their behalf. However, internationally, they are regarded as "territories for which the United Kingdom is responsible".[143] The power to pass legislation affecting the islands ultimately rests with their own respective legislative assemblies, with the assent of the Crown (Privy Council or, in the case of the Isle of Man, in certain circumstances the Lieutenant-Governor).[144] Since 2005 each Crown dependency has had a Chief Minister as its head of government.[145] -Politics -Main articles: Politics of the United Kingdom, Monarchy of the United Kingdom and Elections in the United Kingdom -Elderly lady with a yellow hat and grey hair is smiling in outdoor setting. -Elizabeth II, Queen of the United Kingdom and the other Commonwealth realms -The United Kingdom is a unitary state under a constitutional monarchy. Queen Elizabeth II is the head of state of the UK as well as monarch of fifteen other independent Commonwealth countries. The monarch has "the right to be consulted, the right to encourage, and the right to warn".[146] The United Kingdom is one of only four countries in the world to have an uncodified constitution.[147][nb 7] The Constitution of the United Kingdom thus consists mostly of a collection of disparate written sources, including statutes, judge-made case law and international treaties, together with constitutional conventions. As there is no technical difference between ordinary statutes and "constitutional law", the UK Parliament can perform "constitutional reform" simply by passing Acts of Parliament, and thus has the political power to change or abolish almost any written or unwritten element of the constitution. However, no Parliament can pass laws that future Parliaments cannot change.[148] -Government -Main article: Government of the United Kingdom -The UK has a parliamentary government based on the Westminster system that has been emulated around the world: a legacy of the British Empire. The parliament of the United Kingdom that meets in the Palace of Westminster has two houses; an elected House of Commons and an appointed House of Lords. All bills passed are given Royal Assent before becoming law. -The position of prime minister,[nb 8] the UK's head of government,[149] belongs to the person most likely to command the confidence of the House of Commons; this individual is typically the leader of the political party or coalition of parties that holds the largest number of seats in that chamber. The prime minister chooses a cabinet and they are formally appointed by the monarch to form Her Majesty's Government. By convention, the Queen respects the prime minister's decisions of government.[150] -Large sand-coloured building of Gothic design beside brown river and road bridge. The building has several large towers, including large clock-tower. -The Palace of Westminster, seat of both houses of the Parliament of the United Kingdom -The cabinet is traditionally drawn from members of a prime minister's party or coalition and mostly from the House of Commons but always from both legislative houses, the cabinet being responsible to both. Executive power is exercised by the prime minister and cabinet, all of whom are sworn into the Privy Council of the United Kingdom, and become Ministers of the Crown. The current Prime Minister is David Cameron, who has been in office since 11 May 2010.[151] Cameron is the leader of the Conservative Party and heads a coalition with the Liberal Democrats. For elections to the House of Commons, the UK is currently divided into 650 constituencies,[152] each electing a single member of parliament (MP) by simple plurality. General elections are called by the monarch when the prime minister so advises. The Parliament Acts 1911 and 1949 require that a new election must be called no later than five years after the previous general election.[153] -The UK's three major political parties are the Conservative Party (Tories), the Labour Party and the Liberal Democrats, representing the British traditions of conservatism, socialism and social liberalism, respectively. During the 2010 general election these three parties won 622 out of 650 seats available in the House of Commons.[154][155] Most of the remaining seats were won by parties that contest elections only in one part of the UK: the Scottish National Party (Scotland only); Plaid Cymru (Wales only); and the Alliance Party, Democratic Unionist Party, Social Democratic and Labour Party and Sinn Féin (Northern Ireland only[nb 9]). In accordance with party policy, no elected Sinn Féin members of parliament have ever attended the House of Commons to speak on behalf of their constituents because of the requirement to take an oath of allegiance to the monarch. -Devolved administrations -Main articles: Devolution in the United Kingdom, Northern Ireland Executive, Scottish Government and Welsh Government -Modern one-story building with grass on roof and large sculpted grass area in front. Behind are residential buildings in a mixture of styles. -The Scottish Parliament Building in Holyrood is the seat of the Scottish Parliament. -Scotland, Wales and Northern Ireland each have their own government or executive, led by a First Minister (or, in the case of Northern Ireland, a diarchal First Minister and deputy First Minister), and a devolved unicameral legislature. England, the largest country of the United Kingdom, has no such devolved executive or legislature and is administered and legislated for directly by the UK government and parliament on all issues. This situation has given rise to the so-called West Lothian question which concerns the fact that members of parliament from Scotland, Wales and Northern Ireland can vote, sometimes decisively,[156] on matters that only affect England.[157] The McKay Commission reported on this matter in March 2013 recommending that laws affecting only England should need support from a majority of English members of parliament.[158] -The Scottish Government and Parliament have wide-ranging powers over any matter that has not been specifically reserved to the UK parliament, including education, healthcare, Scots law and local government.[159] At the 2011 elections the Scottish National Party won re-election and achieved an overall majority in the Scottish parliament, with its leader, Alex Salmond, as First Minister of Scotland.[160][161] In 2012, the UK and Scottish governments signed the Edinburgh Agreement setting out the terms for a referendum on Scottish independence in 2014, which was defeated 55% to 45%. -The Welsh Government and the National Assembly for Wales have more limited powers than those devolved to Scotland.[162] The Assembly is able to legislate on devolved matters through Acts of the Assembly, which require no prior consent from Westminster. The 2011 elections resulted in a minority Labour administration led by Carwyn Jones.[163] -The Northern Ireland Executive and Assembly have powers similar to those devolved to Scotland. The Executive is led by a diarchy representing unionist and nationalist members of the Assembly. Currently, Peter Robinson (Democratic Unionist Party) and Martin McGuinness (Sinn Féin) are First Minister and deputy First Minister respectively.[164] Devolution to Northern Ireland is contingent on participation by the Northern Ireland administration in the North-South Ministerial Council, where the Northern Ireland Executive cooperates and develops joint and shared policies with the Government of Ireland. The British and Irish governments co-operate on non-devolved matters affecting Northern Ireland through the British–Irish Intergovernmental Conference, which assumes the responsibilities of the Northern Ireland administration in the event of its non-operation. -The UK does not have a codified constitution and constitutional matters are not among the powers devolved to Scotland, Wales or Northern Ireland. Under the doctrine of parliamentary sovereignty, the UK Parliament could, in theory, therefore, abolish the Scottish Parliament, Welsh Assembly or Northern Ireland Assembly.[165][166] Indeed, in 1972, the UK Parliament unilaterally prorogued the Parliament of Northern Ireland, setting a precedent relevant to contemporary devolved institutions.[167] In practice, it would be politically difficult for the UK Parliament to abolish devolution to the Scottish Parliament and the Welsh Assembly, given the political entrenchment created by referendum decisions.[168] The political constraints placed upon the UK Parliament's power to interfere with devolution in Northern Ireland are even greater than in relation to Scotland and Wales, given that devolution in Northern Ireland rests upon an international agreement with the Government of Ireland.[169] -Law and criminal justice -Main article: Law of the United Kingdom - -The Royal Courts of Justice of England and Wales -The United Kingdom does not have a single legal system, as Article 19 of the 1706 Treaty of Union provided for the continuation of Scotland's separate legal system.[170] Today the UK has three distinct systems of law: English law, Northern Ireland law and Scots law. A new Supreme Court of the United Kingdom came into being in October 2009 to replace the Appellate Committee of the House of Lords.[171][172] The Judicial Committee of the Privy Council, including the same members as the Supreme Court, is the highest court of appeal for several independent Commonwealth countries, the British Overseas Territories and the Crown Dependencies.[173] -Both English law, which applies in England and Wales, and Northern Ireland law are based on common-law principles.[174] The essence of common law is that, subject to statute, the law is developed by judges in courts, applying statute, precedent and common sense to the facts before them to give explanatory judgements of the relevant legal principles, which are reported and binding in future similar cases (stare decisis).[175] The courts of England and Wales are headed by the Senior Courts of England and Wales, consisting of the Court of Appeal, the High Court of Justice (for civil cases) and the Crown Court (for criminal cases). The Supreme Court is the highest court in the land for both criminal and civil appeal cases in England, Wales and Northern Ireland and any decision it makes is binding on every other court in the same jurisdiction, often having a persuasive effect in other jurisdictions.[176] - -The High Court of Justiciary – the supreme criminal court of Scotland. -Scots law is a hybrid system based on both common-law and civil-law principles. The chief courts are the Court of Session, for civil cases,[177] and the High Court of Justiciary, for criminal cases.[178] The Supreme Court of the United Kingdom serves as the highest court of appeal for civil cases under Scots law.[179] Sheriff courts deal with most civil and criminal cases including conducting criminal trials with a jury, known as sheriff solemn court, or with a sheriff and no jury, known as sheriff summary Court.[180] The Scots legal system is unique in having three possible verdicts for a criminal trial: "guilty", "not guilty" and "not proven". Both "not guilty" and "not proven" result in an acquittal.[181] -Crime in England and Wales increased in the period between 1981 and 1995, though since that peak there has been an overall fall of 48% in crime from 1995 to 2007/08,[182] according to crime statistics. The prison population of England and Wales has almost doubled over the same period, to over 80,000, giving England and Wales the highest rate of incarceration in Western Europe at 147 per 100,000.[183] Her Majesty's Prison Service, which reports to the Ministry of Justice, manages most of the prisons within England and Wales. Crime in Scotland fell to its lowest recorded level for 32 years in 2009/10, falling by ten per cent.[184] At the same time Scotland's prison population, at over 8,000,[185] is at record levels and well above design capacity.[186] The Scottish Prison Service, which reports to the Cabinet Secretary for Justice, manages Scotland's prisons. -Foreign relations -Main article: Foreign relations of the United Kingdom - -The Prime Minister of the United Kingdom, David Cameron, and the President of the United States, Barack Obama, during the 2010 G-20 Toronto summit. -The UK is a permanent member of the United Nations Security Council, a member of NATO, the Commonwealth of Nations, G7, G8, G20, the OECD, the WTO, the Council of Europe, the OSCE, and is a member state of the European Union. The UK is said to have a "Special Relationship" with the United States and a close partnership with France—the "Entente cordiale"—and shares nuclear weapons technology with both countries.[187][188] The UK is also closely linked with the Republic of Ireland; the two countries share a Common Travel Area and co-operate through the British-Irish Intergovernmental Conference and the British-Irish Council. Britain's global presence and influence is further amplified through its trading relations, foreign investments, official development assistance and military engagements.[189] -Military - -Troopers of the Blues and Royals during the 2007 Trooping the Colour ceremony -Main article: British Armed Forces -The armed forces of the United Kingdom—officially, Her Majesty's Armed Forces—consist of three professional service branches: the Royal Navy and Royal Marines (forming the Naval Service), the British Army and the Royal Air Force.[190] The forces are managed by the Ministry of Defence and controlled by the Defence Council, chaired by the Secretary of State for Defence. The Commander-in-Chief is the British monarch, Elizabeth II, to whom members of the forces swear an oath of allegiance.[191] The Armed Forces are charged with protecting the UK and its overseas territories, promoting the UK's global security interests and supporting international peacekeeping efforts. They are active and regular participants in NATO, including the Allied Rapid Reaction Corps, as well as the Five Power Defence Arrangements, RIMPAC and other worldwide coalition operations. Overseas garrisons and facilities are maintained in Ascension Island, Belize, Brunei, Canada, Cyprus, Diego Garcia, the Falkland Islands, Germany, Gibraltar, Kenya and Qatar.[192] -The British armed forces played a key role in establishing the British Empire as the dominant world power in the 18th, 19th and early 20th centuries. Throughout its unique history the British forces have seen action in a number of major wars, such as the Seven Years' War, the Napoleonic Wars, the Crimean War, World War I and World War II—as well as many colonial conflicts. By emerging victorious from such conflicts, Britain has often been able to decisively influence world events. Since the end of the British Empire, the UK has nonetheless remained a major military power. Following the end of the Cold War, defence policy has a stated assumption that "the most demanding operations" will be undertaken as part of a coalition.[193] Setting aside the intervention in Sierra Leone, recent UK military operations in Bosnia, Kosovo, Afghanistan, Iraq and, most recently, Libya, have followed this approach. The last time the British military fought alone was the Falklands War of 1982. -According to various sources, including the Stockholm International Peace Research Institute and the International Institute for Strategic Studies, the United Kingdom has the fifth- or sixth-highest military expenditure in the world. Total defence spending currently accounts for around 2.4% of total national GDP.[22][23] -Economy -Main article: Economy of the United Kingdom - -The Bank of England – the central bank of the United Kingdom -The UK has a partially regulated market economy.[194] Based on market exchange rates the UK is today the sixth-largest economy in the world and the third-largest in Europe after Germany and France, having fallen behind France for the first time in over a decade in 2008.[195] HM Treasury, led by the Chancellor of the Exchequer, is responsible for developing and executing the British government's public finance policy and economic policy. The Bank of England is the UK's central bank and is responsible for issuing notes and coins in the nation's currency, the pound sterling. Banks in Scotland and Northern Ireland retain the right to issue their own notes, subject to retaining enough Bank of England notes in reserve to cover their issue. Pound sterling is the world's third-largest reserve currency (after the US Dollar and the Euro).[196] Since 1997 the Bank of England's Monetary Policy Committee, headed by the Governor of the Bank of England, has been responsible for setting interest rates at the level necessary to achieve the overall inflation target for the economy that is set by the Chancellor each year.[197] -The UK service sector makes up around 73% of GDP.[198] London is one of the three "command centres" of the global economy (alongside New York City and Tokyo),[199] it is the world's largest financial centre alongside New York,[200][201][202] and it has the largest city GDP in Europe.[203] Edinburgh is also one of the largest financial centres in Europe.[204] Tourism is very important to the British economy and, with over 27 million tourists arriving in 2004, the United Kingdom is ranked as the sixth major tourist destination in the world and London has the most international visitors of any city in the world.[205][206] The creative industries accounted for 7% GVA in 2005 and grew at an average of 6% per annum between 1997 and 2005.[207] - -The Airbus A350 has its wings and engines manufactured in the UK. -The Industrial Revolution started in the UK with an initial concentration on the textile industry,[208] followed by other heavy industries such as shipbuilding, coal mining and steelmaking.[209][210] -The empire was exploited as an overseas market for British products, allowing the UK to dominate international trade in the 19th century. As other nations industrialised, coupled with economic decline after two world wars, the United Kingdom began to lose its competitive advantage and heavy industry declined, by degrees, throughout the 20th century. Manufacturing remains a significant part of the economy but accounted for only 16.7% of national output in 2003.[211] -The automotive industry is a significant part of the UK manufacturing sector and employs over 800,000 people, with a turnover of some £52 billion, generating £26.6 billion of exports.[212] -The aerospace industry of the UK is the second- or third-largest national aerospace industry in the world depending upon the method of measurement and has an annual turnover of around £20 billion. The wings for the Airbus A380 and the A350 XWB are designed and manufactured at Airbus UK's world-leading Broughton facility, whilst over a quarter of the value of the Boeing 787 comes from UK manufacturers including Eaton (fuel subsystem pumps), Messier-Bugatti-Dowty (the landing gear) and Rolls-Royce (the engines). Other key names include GKN Aerospace – an expert in metallic and composite aerostructures that's involved in almost every civil and military fixed and rotary wing aircraft in production and development today.[213][214][215][216] -BAE Systems - plays a critical role on some of the world's biggest defence aerospace projects. The company makes large sections of the Typhoon Eurofighter at its sub-assembly plant in Salmesbury and assembles the aircraft for the RAF at its Warton Plant, near Preston. It is also a principal subcontractor on the F35 Joint Strike Fighter - the world's largest single defence project - for which it designs and manufactures a range of components including the aft fuselage, vertical and horizontal tail and wing tips and fuel system. As well as this it manufactures the Hawk, the world's most successful jet training aircraft.[216] Airbus UK also manufactures the wings for the A400m military transporter. Rolls-Royce, is the world's second-largest aero-engine manufacturer. Its engines power more than 30 types of commercial aircraft and it has more than 30,000 engines currently in service across both the civil and defence sectors. Agusta Westland designs and manufactures complete helicopters in the UK.[216] -The UK space industry is growing very fast. Worth £9.1bn in 2011 and employing 29,000 people, it is growing at a rate of some 7.5 per cent annually, according to its umbrella organisation, the UK Space Agency. Government strategy is for the space industry to be a £40bn business for the UK by 2030, capturing a 10 per cent share of the $250bn world market for commercial space technology.[216] On 16 July 2013, the British government pledged £60m to the Skylon project: this investment will provide support at a "crucial stage" to allow a full-scale prototype of the SABRE engine to be built. -The pharmaceutical industry plays an important role in the UK economy and the country has the third-highest share of global pharmaceutical R&D expenditures (after the United States and Japan).[217][218] -Agriculture is intensive, highly mechanised and efficient by European standards, producing about 60% of food needs with less than 1.6% of the labour force (535,000 workers).[219] Around two-thirds of production is devoted to livestock, one-third to arable crops. Farmers are subsidised by the EU's Common Agricultural Policy. The UK retains a significant, though much reduced fishing industry. It is also rich in a number of natural resources including coal, petroleum, natural gas, tin, limestone, iron ore, salt, clay, chalk, gypsum, lead, silica and an abundance of arable land. - -The City of London is the world's largest financial centre alongside New York[200][201][202] -In the final quarter of 2008 the UK economy officially entered recession for the first time since 1991.[220] Unemployment increased from 5.2% in May 2008 to 7.6% in May 2009 and by January 2012 the unemployment rate among 18 to 24-year-olds had risen from 11.9% to 22.5%, the highest since current records began in 1992.[221][222] Total UK government debt rose from 44.4% of GDP in 2007 to 82.9% of GDP in 2011.[223] In February 2013, the UK lost its top AAA credit rating for the first time since 1978.[224] -Inflation-adjusted wages in the UK fell by 3.2% between the third quarter of 2010 and the third quarter of 2012.[225] Since the 1980s, economic inequality has grown faster in the UK than in any other developed country.[226] -The poverty line in the UK is commonly defined as being 60% of the median household income.[nb 10] In 2007–2008 13.5 million people, or 22% of the population, lived below this line. This is a higher level of relative poverty than all but four other EU members.[227] In the same year 4.0 million children, 31% of the total, lived in households below the poverty line after housing costs were taken into account. This is a decrease of 400,000 children since 1998–1999.[228] The UK imports 40% of its food supplies.[229] The Office for National Statistics has estimated that in 2011, 14 million people were at risk of poverty or social exclusion, and that one person in 20 (5.1%) was now experiencing "severe material depression,"[230] up from 3 million people in 1977.[231][232] -Science and technology -Main article: Science and technology in the United Kingdom - -Charles Darwin (1809–82), whose theory of evolution by natural selection is the foundation of modern biological sciences -England and Scotland were leading centres of the Scientific Revolution from the 17th century[233] and the United Kingdom led the Industrial Revolution from the 18th century,[208] and has continued to produce scientists and engineers credited with important advances.[234] Major theorists from the 17th and 18th centuries include Isaac Newton, whose laws of motion and illumination of gravity have been seen as a keystone of modern science;[235] from the 19th century Charles Darwin, whose theory of evolution by natural selection was fundamental to the development of modern biology, and James Clerk Maxwell, who formulated classical electromagnetic theory; and more recently Stephen Hawking, who has advanced major theories in the fields of cosmology, quantum gravity and the investigation of black holes.[236] Major scientific discoveries from the 18th century include hydrogen by Henry Cavendish;[237] from the 20th century penicillin by Alexander Fleming,[238] and the structure of DNA, by Francis Crick and others.[239] Major engineering projects and applications by people from the UK in the 18th century include the steam locomotive, developed by Richard Trevithick and Andrew Vivian;[240] from the 19th century the electric motor by Michael Faraday, the incandescent light bulb by Joseph Swan,[241] and the first practical telephone, patented by Alexander Graham Bell;[242] and in the 20th century the world's first working television system by John Logie Baird and others,[243] the jet engine by Frank Whittle, the basis of the modern computer by Alan Turing, and the World Wide Web by Tim Berners-Lee.[244] -Scientific research and development remains important in British universities, with many establishing science parks to facilitate production and co-operation with industry.[245] Between 2004 and 2008 the UK produced 7% of the world's scientific research papers and had an 8% share of scientific citations, the third and second highest in the world (after the United States and China, and the United States, respectively).[246] Scientific journals produced in the UK include Nature, the British Medical Journal and The Lancet.[247] -Transport -Main article: Transport in the United Kingdom - -Heathrow Terminal 5 building. London Heathrow Airport has the most international passenger traffic of any airport in the world.[248][249] -A radial road network totals 29,145 miles (46,904 km) of main roads, 2,173 miles (3,497 km) of motorways and 213,750 miles (344,000 km) of paved roads.[105] In 2009 there were a total of 34 million licensed vehicles in Great Britain.[250] -The UK has a railway network of 10,072 miles (16,209 km) in Great Britain and 189 miles (304 km) in Northern Ireland. Railways in Northern Ireland are operated by NI Railways, a subsidiary of state-owned Translink. In Great Britain, the British Rail network was privatised between 1994 and 1997. Network Rail owns and manages most of the fixed assets (tracks, signals etc.). About 20 privately owned (and foreign state-owned railways including: Deutsche Bahn; SNCF and Nederlandse Spoorwegen) Train Operating Companies (including state-owned East Coast), operate passenger trains and carry over 18,000 passenger trains daily. There are also some 1,000 freight trains in daily operation.[105] The UK government is to spend £30 billion on a new high-speed railway line, HS2, to be operational by 2025.[251] Crossrail, under construction in London, Is Europe's largest construction project with a £15 billion projected cost.[252][253] -In the year from October 2009 to September 2010 UK airports handled a total of 211.4 million passengers.[254] In that period the three largest airports were London Heathrow Airport (65.6 million passengers), Gatwick Airport (31.5 million passengers) and London Stansted Airport (18.9 million passengers).[254] London Heathrow Airport, located 15 miles (24 km) west of the capital, has the most international passenger traffic of any airport in the world[248][249] and is the hub for the UK flag carrier British Airways, as well as for BMI and Virgin Atlantic.[255] -Energy -Main article: Energy in the United Kingdom - -An oil platform in the North Sea -In 2006, the UK was the world's ninth-largest consumer of energy and the 15th-largest producer.[256] The UK is home to a number of large energy companies, including two of the six oil and gas "supermajors" – BP and Royal Dutch Shell – and BG Group.[257][258] In 2011, 40% of the UK's electricity was produced by gas, 30% by coal, 19% by nuclear power and 4.2% by wind, hydro, biofuels and wastes.[259] -In 2009, the UK produced 1.5 million barrels per day (bbl/d) of oil and consumed 1.7 million bbl/d.[260] Production is now in decline and the UK has been a net importer of oil since 2005.[260] In 2010 the UK had around 3.1 billion barrels of proven crude oil reserves, the largest of any EU member state.[260] In 2009, 66.5% of the UK's oil supply was imported.[261] -In 2009, the UK was the 13th-largest producer of natural gas in the world and the largest producer in the EU.[262] Production is now in decline and the UK has been a net importer of natural gas since 2004.[262] In 2009, half of British gas was supplied from imports and this is expected to increase to at least 75% by 2015, as domestic reserves are depleted.[259] -Coal production played a key role in the UK economy in the 19th and 20th centuries. In the mid-1970s, 130 million tonnes of coal was being produced annually, not falling below 100 million tonnes until the early 1980s. During the 1980s and 1990s the industry was scaled back considerably. In 2011, the UK produced 18.3 million tonnes of coal.[263] In 2005 it had proven recoverable coal reserves of 171 million tons.[263] The UK Coal Authority has stated there is a potential to produce between 7 billion tonnes and 16 billion tonnes of coal through underground coal gasification (UCG) or 'fracking',[264] and that, based on current UK coal consumption, such reserves could last between 200 and 400 years.[265] However, environmental and social concerns have been raised over chemicals getting into the water table and minor earthquakes damaging homes.[266][267] -In the late 1990s, nuclear power plants contributed around 25% of total annual electricity generation in the UK, but this has gradually declined as old plants have been shut down and ageing-related problems affect plant availability. In 2012, the UK had 16 reactors normally generating about 19% of its electricity. All but one of the reactors will be retired by 2023. Unlike Germany and Japan, the UK intends to build a new generation of nuclear plants from about 2018.[259] -Demographics -Main article: Demographics of the United Kingdom - -Map of population density in the UK as at the 2011 census. -A census is taken simultaneously in all parts of the UK every ten years.[268] The Office for National Statistics is responsible for collecting data for England and Wales, the General Register Office for Scotland and the Northern Ireland Statistics and Research Agency each being responsible for censuses in their respective countries.[269] In the 2011 census the total population of the United Kingdom was 63,181,775.[270] It is the third-largest in the European Union, the fifth-largest in the Commonwealth and the 21st-largest in the world. 2010 was the third successive year in which natural change contributed more to population growth than net long-term international migration.[271][271] Between 2001 and 2011 the population increased by an average annual rate of approximately 0.7 per cent.[270] This compares to 0.3 per cent per year in the period 1991 to 2001 and 0.2 per cent in the decade 1981 to 1991.[271] The 2011 census also confirmed that the proportion of the population aged 0–14 has nearly halved (31 per cent in 1911 compared to 18 in 2011) and the proportion of older people aged 65 and over has more than trebled (from 5 to 16 per cent).[270] It has been estimated that the number of people aged 100 or over will rise steeply to reach over 626,000 by 2080.[272] -England's population in 2011 was found to be 53 million.[273] It is one of the most densely populated countries in the world, with 383 people resident per square kilometre in mid-2003,[274] with a particular concentration in London and the south-east.[275] The 2011 census put Scotland's population at 5.3 million,[276] Wales at 3.06 million and Northern Ireland at 1.81 million.[273] In percentage terms England has had the fastest growing population of any country of the UK in the period from 2001 to 2011, with an increase of 7.9%. -In 2012 the average total fertility rate (TFR) across the UK was 1.92 children per woman.[277] While a rising birth rate is contributing to current population growth, it remains considerably below the 'baby boom' peak of 2.95 children per woman in 1964,[278] below the replacement rate of 2.1, but higher than the 2001 record low of 1.63.[277] In 2012, Scotland had the lowest TFR at only 1.67, followed by Wales at 1.88, England at 1.94, and Northern Ireland at 2.03.[277] In 2011, 47.3% of births in the UK were to unmarried women.[279] A government figure estimated that there are 3.6 million homosexual people in Britain comprising 6 per cent of the population.[280] -view talk edit -view talk edit -Largest urban areas of the United Kingdom -United Kingdom 2011 census Built-up areas[281][282][283] -Rank Urban area Pop. Principal settlement Rank Urban area Pop. Principal settlement -Greater London Urban Area -Greater London Urban Area -Greater Manchester Urban Area -Greater Manchester Urban Area -1 Greater London Urban Area 9,787,426 London 11 Bristol Urban Area 617,280 Bristol West Midlands Urban Area -West Midlands Urban Area -West Yorkshire Urban Area -West Yorkshire Urban Area -2 Greater Manchester Urban Area 2,553,379 Manchester 12 Belfast Metropolitan Urban Area 579,236 Belfast -3 West Midlands Urban Area 2,440,986 Birmingham 13 Leicester Urban Area 508,916 Leicester -4 West Yorkshire Urban Area 1,777,934 Leeds 14 Edinburgh 488,610 Edinburgh -5 Greater Glasgow 976,970 Glasgow 15 Brighton/Worthing/Littlehampton 474,485 Brighton -6 Liverpool Urban Area 864,122 Liverpool 16 South East Dorset conurbation 466,266 Bournemouth -7 South Hampshire 855,569 Southampton 17 Cardiff Urban Area 390,214 Cardiff -8 Tyneside 774,891 Newcastle 18 Teesside 376,633 Middlesbrough -9 Nottingham Urban Area 729,977 Nottingham 19 The Potteries Urban Area 372,775 Stoke-on-Trent -10 Sheffield Urban Area 685,368 Sheffield 20 Coventry and Bedworth Urban Area 359,262 Coventry - -Ethnic groups - -Map showing the percentage of the population who are not white according to the 2011 census. -Ethnic group 2011 -population 2011 -% -White 55,010,359 87.1 -White: Irish Traveller 63,193 0.1 -Asian or Asian British: Indian 1,451,862 -2.3 -Asian or Asian British: Pakistani 1,173,892 -1.9 -Asian or Asian British: Bangladeshi 451,529 -0.7 -Asian or Asian British: Chinese 433,150 -0.7 -Asian or Asian British: Asian Other 861,815 -1.4 -Asian or Asian British: Total 4,373,339 -7.0 -Black or Black British 1,904,684 -3.0 -British Mixed 1,250,229 -2.0 -Other: Total 580,374 -0.9 -Total[284] 63,182,178 -100 -Historically, indigenous British people were thought to be descended from the various ethnic groups that settled there before the 11th century: the Celts, Romans, Anglo-Saxons, Norse and the Normans. Welsh people could be the oldest ethnic group in the UK.[285] A 2006 genetic study shows that more than 50 per cent of England's gene pool contains Germanic Y chromosomes.[286] Another 2005 genetic analysis indicates that "about 75 per cent of the traceable ancestors of the modern British population had arrived in the British isles by about 6,200 years ago, at the start of the British Neolithic or Stone Age", and that the British broadly share a common ancestry with the Basque people.[287][288][289] -The UK has a history of small-scale non-white immigration, with Liverpool having the oldest Black population in the country dating back to at least the 1730s during the period of the African slave trade,[290] and the oldest Chinese community in Europe, dating to the arrival of Chinese seamen in the 19th century.[291] In 1950 there were probably fewer than 20,000 non-white residents in Britain, almost all born overseas.[292] -Since 1948 substantial immigration from Africa, the Caribbean and South Asia has been a legacy of ties forged by the British Empire. Migration from new EU member states in Central and Eastern Europe since 2004 has resulted in growth in these population groups but, as of 2008, the trend is reversing. Many of these migrants are returning to their home countries, leaving the size of these groups unknown.[293] In 2011, 86% of the population identified themselves as White, meaning 12.9% of the UK population identify themselves as of mixed ethnic minority. -Ethnic diversity varies significantly across the UK. 30.4% of London's population and 37.4% of Leicester's was estimated to be non-white in 2005,[294][295] whereas less than 5% of the populations of North East England, Wales and the South West were from ethnic minorities, according to the 2001 census.[296] In 2011, 26.5% of primary and 22.2% of secondary pupils at state schools in England were members of an ethnic minority.[297] -The non-white British population of England and Wales increased by 38% from 6.6 million in 2001 to 9.1 million in 2009.[298] The fastest-growing group was the mixed-ethnicity population, which doubled from 672,000 in 2001 to 986,600 in 2009. Also in the same period, a decrease of 36,000 white British people was recorded.[299] -Languages -Main article: Languages of the United Kingdom - -The English-speaking world. Countries in dark blue have a majority of native speakers; countries where English is an official but not a majority language are shaded in light blue. English is one of the official languages of the European Union[300] and the United Nations[301] -The UK's de facto official language is English.[302][303] It is estimated that 95% of the UK's population are monolingual English speakers.[304] 5.5% of the population are estimated to speak languages brought to the UK as a result of relatively recent immigration.[304] South Asian languages, including Bengali, Tamil, Punjabi, Hindi and Gujarati, are the largest grouping and are spoken by 2.7% of the UK population.[304] According to the 2011 census, Polish has become the second-largest language spoken in England and has 546,000 speakers.[305] -Four Celtic languages are spoken in the UK: Welsh; Irish; Scottish Gaelic; and Cornish. All are recognised as regional or minority languages, subject to specific measures of protection and promotion under the European Charter for Regional or Minority Languages[2][306] and the Framework Convention for the Protection of National Minorities.[307] In the 2001 Census over a fifth (21%) of the population of Wales said they could speak Welsh,[308] an increase from the 1991 Census (18%).[309] In addition it is estimated that about 200,000 Welsh speakers live in England.[310] In the same census in Northern Ireland 167,487 people (10.4%) stated that they had "some knowledge of Irish" (see Irish language in Northern Ireland), almost exclusively in the nationalist (mainly Catholic) population. Over 92,000 people in Scotland (just under 2% of the population) had some Gaelic language ability, including 72% of those living in the Outer Hebrides.[311] The number of schoolchildren being taught through Welsh, Scottish Gaelic and Irish is increasing.[312] Among emigrant-descended populations some Scottish Gaelic is still spoken in Canada (principally Nova Scotia and Cape Breton Island),[313] and Welsh in Patagonia, Argentina.[314] -Scots, a language descended from early northern Middle English, has limited recognition alongside its regional variant, Ulster Scots in Northern Ireland, without specific commitments to protection and promotion.[2][315] -It is compulsory for pupils to study a second language up to the age of 14 in England,[316] and up to age 16 in Scotland. French and German are the two most commonly taught second languages in England and Scotland. All pupils in Wales are taught Welsh as a second language up to age 16, or are taught in Welsh.[317] -Religion -Main article: Religion in the United Kingdom - -Westminster Abbey is used for the coronation of British monarchs -Forms of Christianity have dominated religious life in what is now the United Kingdom for over 1,400 years.[318] Although a majority of citizens still identify with Christianity in many surveys, regular church attendance has fallen dramatically since the middle of the 20th century,[319] while immigration and demographic change have contributed to the growth of other faiths, most notably Islam.[320] This has led some commentators to variously describe the UK as a multi-faith,[321] secularised,[322] or post-Christian society.[323] -In the 2001 census 71.6% of all respondents indicated that they were Christians, with the next largest faiths (by number of adherents) being Islam (2.8%), Hinduism (1.0%), Sikhism (0.6%), Judaism (0.5%), Buddhism (0.3%) and all other religions (0.3%).[324] 15% of respondents stated that they had no religion, with a further 7% not stating a religious preference.[325] A Tearfund survey in 2007 showed only one in ten Britons actually attend church weekly.[326] Between the 2001 and 2011 census there was a decrease in the amount of people who identified as Christian by 12%, whilst the percentage of those reporting no religious affiliation doubled. This contrasted with growth in the other main religious group categories, with the number of Muslims increasing by the most substantial margin to a total of about 5%.[327] -The Church of England is the established church in England.[328] It retains a representation in the UK Parliament and the British monarch is its Supreme Governor.[329] In Scotland the Presbyterian Church of Scotland is recognised as the national church. It is not subject to state control, and the British monarch is an ordinary member, required to swear an oath to "maintain and preserve the Protestant Religion and Presbyterian Church Government" upon his or her accession.[330][331] The (Anglican) Church in Wales was disestablished in 1920 and, as the (Anglican) Church of Ireland was disestablished in 1870 before the partition of Ireland, there is no established church in Northern Ireland.[332] Although there are no UK-wide data in the 2001 census on adherence to individual Christian denominations, it has been estimated that 62% of Christians are Anglican, 13.5% Catholic, 6% Presbyterian, 3.4% Methodist with small numbers of other Protestant denominations such as Open Brethren, and Orthodox churches.[333] -Migration -Main article: Immigration to the United Kingdom since 1922 -See also: Foreign-born population of the United Kingdom - -Estimated foreign-born population by country of birth, April 2007 – March 2008 -The United Kingdom has experienced successive waves of migration. The Great Famine in Ireland, then part of the United Kingdom, resulted in perhaps a million people migrating to Great Brtain.[334] Unable to return to Poland at the end of World War II, over 120,000 Polish veterans remained in the UK permanently.[335] After World War II, there was significant immigration from the colonies and newly independent former colonies, partly as a legacy of empire and partly driven by labour shortages. Many of these migrants came from the Caribbean and the Indian subcontinent.[336] The British Asian population has increased from 2.2 million in 2001 to over 4.2 million in 2011.[337] -One of the more recent trends in migration has been the arrival of workers from the new EU member states in Eastern Europe. In 2010, there were 7.0 million foreign-born residents in the UK, corresponding to 11.3% of the total population. Of these, 4.76 million (7.7%) were born outside the EU and 2.24 million (3.6%) were born in another EU Member State.[338] The proportion of foreign-born people in the UK remains slightly below that of many other European countries.[339] However, immigration is now contributing to a rising population[340] with arrivals and UK-born children of migrants accounting for about half of the population increase between 1991 and 2001. Analysis of Office for National Statistics (ONS) data shows that a net total of 2.3 million migrants moved to the UK in the 15 years from 1991 to 2006.[341][342] In 2008 it was predicted that migration would add 7 million to the UK population by 2031,[343] though these figures are disputed.[344] The ONS reported that net migration rose from 2009 to 2010 by 21 per cent to 239,000.[345] In 2011 the net increase was 251,000: immigration was 589,000, while the number of people emigrating (for more than 12 months) was 338,000.[346][347] -195,046 foreign nationals became British citizens in 2010,[348] compared to 54,902 in 1999.[348][349] A record 241,192 people were granted permanent settlement rights in 2010, of whom 51 per cent were from Asia and 27 per cent from Africa.[350] 25.5 per cent of babies born in England and Wales in 2011 were born to mothers born outside the UK, according to official statistics released in 2012.[351] -Citizens of the European Union, including those of the UK, have the right to live and work in any EU member state.[352] The UK applied temporary restrictions to citizens of Romania and Bulgaria, which joined the EU in January 2007.[353] Research conducted by the Migration Policy Institute for the Equality and Human Rights Commission suggests that, between May 2004 and September 2009, 1.5 million workers migrated from the new EU member states to the UK, two-thirds of them Polish, but that many subsequently returned home, resulting in a net increase in the number of nationals of the new member states in the UK of some 700,000 over that period.[354][355] The late-2000s recession in the UK reduced the economic incentive for Poles to migrate to the UK,[356] the migration becoming temporary and circular.[357] In 2009, for the first time since enlargement, more nationals of the eight central and eastern European states that had joined the EU in 2004 left the UK than arrived.[358] In 2011, citizens of the new EU member states made up 13% of the immigrants entering the country.[346] - -Estimated number of British citizens living overseas by country, 2006 -The UK government has introduced a points-based immigration system for immigration from outside the European Economic Area to replace former schemes, including the Scottish Government's Fresh Talent Initiative.[359] In June 2010 the UK government introduced a temporary limit of 24,000 on immigration from outside the EU, aiming to discourage applications before a permanent cap was imposed in April 2011.[360] The cap has caused tension within the coalition: business secretary Vince Cable has argued that it is harming British businesses.[361] -Emigration was an important feature of British society in the 19th century. Between 1815 and 1930 around 11.4 million people emigrated from Britain and 7.3 million from Ireland. Estimates show that by the end of the 20th century some 300 million people of British and Irish descent were permanently settled around the globe.[362] Today, at least 5.5 million UK-born people live abroad,[363][364][365] mainly in Australia, Spain, the United States and Canada.[363][366] -Education -Main article: Education in the United Kingdom -See also: Education in England, Education in Northern Ireland, Education in Scotland and Education in Wales - -King's College, part of the University of Cambridge, which was founded in 1209 -Education in the United Kingdom is a devolved matter, with each country having a separate education system. -Whilst education in England is the responsibility of the Secretary of State for Education, the day-to-day administration and funding of state schools is the responsibility of local authorities.[367] Universally free of charge state education was introduced piecemeal between 1870 and 1944.[368][369] Education is now mandatory from ages five to sixteen (15 if born in late July or August). In 2011, the Trends in International Mathematics and Science Study (TIMSS) rated 13–14-year-old pupils in England and Wales 10th in the world for maths and 9th for science.[370] The majority of children are educated in state-sector schools, a small proportion of which select on the grounds of academic ability. Two of the top ten performing schools in terms of GCSE results in 2006 were state-run grammar schools. Over half of students at the leading universities of Cambridge and Oxford had attended state schools.[371] Despite a fall in actual numbers the proportion of children in England attending private schools has risen to over 7%.[372] In 2010, more than 45% of places at the University of Oxford and 40% at the University of Cambridge were taken by students from private schools, even though they educate just 7% of the population.[373] England has the two oldest universities in English-speaking world, Universities of Oxford and Cambridge (jointly known as "Oxbridge") with history of over eight centuries. The United Kingdom has 9 universities featured in the Times Higher Education top 100 rankings, making it second to the United States in terms of representation.[374] - -Queen's University Belfast, built in 1849[375] -Education in Scotland is the responsibility of the Cabinet Secretary for Education and Lifelong Learning, with day-to-day administration and funding of state schools the responsibility of Local Authorities. Two non-departmental public bodies have key roles in Scottish education. The Scottish Qualifications Authority is responsible for the development, accreditation, assessment and certification of qualifications other than degrees which are delivered at secondary schools, post-secondary colleges of further education and other centres.[376] The Learning and Teaching Scotland provides advice, resources and staff development to education professionals.[377] Scotland first legislated for compulsory education in 1496.[378] The proportion of children in Scotland attending private schools is just over 4%, and it has been rising slowly in recent years.[379] Scottish students who attend Scottish universities pay neither tuition fees nor graduate endowment charges, as fees were abolished in 2001 and the graduate endowment scheme was abolished in 2008.[380] -The Welsh Government has responsibility for education in Wales. A significant number of Welsh students are taught either wholly or largely in the Welsh language; lessons in Welsh are compulsory for all until the age of 16.[381] There are plans to increase the provision of Welsh-medium schools as part of the policy of creating a fully bilingual Wales. -Education in Northern Ireland is the responsibility of the Minister of Education and the Minister for Employment and Learning, although responsibility at a local level is administered by five education and library boards covering different geographical areas. The Council for the Curriculum, Examinations & Assessment (CCEA) is the body responsible for advising the government on what should be taught in Northern Ireland's schools, monitoring standards and awarding qualifications.[382] -A government commission's report in 2014 found that privately educated people comprise 7% of the general population of the UK but much larger percentages of the top professions, the most extreme case quoted being 71% of senior judges.[383][384] -Healthcare -Main article: Healthcare in the United Kingdom - -The Royal Aberdeen Children's Hospital, an NHS Scotland specialist children's hospital -Healthcare in the United Kingdom is a devolved matter and each country has its own system of private and publicly funded health care, together with alternative, holistic and complementary treatments. Public healthcare is provided to all UK permanent residents and is mostly free at the point of need, being paid for from general taxation. The World Health Organization, in 2000, ranked the provision of healthcare in the United Kingdom as fifteenth best in Europe and eighteenth in the world.[385][386] -Regulatory bodies are organised on a UK-wide basis such as the General Medical Council, the Nursing and Midwifery Council and non-governmental-based, such as the Royal Colleges. However, political and operational responsibility for healthcare lies with four national executives; healthcare in England is the responsibility of the UK Government; healthcare in Northern Ireland is the responsibility of the Northern Ireland Executive; healthcare in Scotland is the responsibility of the Scottish Government; and healthcare in Wales is the responsibility of the Welsh Assembly Government. Each National Health Service has different policies and priorities, resulting in contrasts.[387][388] -Since 1979 expenditure on healthcare has been increased significantly to bring it closer to the European Union average.[389] The UK spends around 8.4 per cent of its gross domestic product on healthcare, which is 0.5 percentage points below the Organisation for Economic Co-operation and Development average and about one percentage point below the average of the European Union.[390] -Culture -Main article: Culture of the United Kingdom -The culture of the United Kingdom has been influenced by many factors including: the nation's island status; its history as a western liberal democracy and a major power; as well as being a political union of four countries with each preserving elements of distinctive traditions, customs and symbolism. As a result of the British Empire, British influence can be observed in the language, culture and legal systems of many of its former colonies including Australia, Canada, India, Ireland, New Zealand, South Africa and the United States. The substantial cultural influence of the United Kingdom has led it to be described as a "cultural superpower."[391][392] -Literature -Main article: British literature - -The Chandos portrait, believed to depict William Shakespeare -'British literature' refers to literature associated with the United Kingdom, the Isle of Man and the Channel Islands. Most British literature is in the English language. In 2005, some 206,000 books were published in the United Kingdom and in 2006 it was the largest publisher of books in the world.[393] -The English playwright and poet William Shakespeare is widely regarded as the greatest dramatist of all time,[394][395][396] and his contemporaries Christopher Marlowe and Ben Jonson have also been held in continuous high esteem. More recently the playwrights Alan Ayckbourn, Harold Pinter, Michael Frayn, Tom Stoppard and David Edgar have combined elements of surrealism, realism and radicalism. -Notable pre-modern and early-modern English writers include Geoffrey Chaucer (14th century), Thomas Malory (15th century), Sir Thomas More (16th century), John Bunyan (17th century) and John Milton (17th century). In the 18th century Daniel Defoe (author of Robinson Crusoe) and Samuel Richardson were pioneers of the modern novel. In the 19th century there followed further innovation by Jane Austen, the gothic novelist Mary Shelley, the children's writer Lewis Carroll, the Brontë sisters, the social campaigner Charles Dickens, the naturalist Thomas Hardy, the realist George Eliot, the visionary poet William Blake and romantic poet William Wordsworth. 20th-century English writers include the science-fiction novelist H. G. Wells; the writers of children's classics Rudyard Kipling, A. A. Milne (the creator of Winnie-the-Pooh), Roald Dahl and Enid Blyton; the controversial D. H. Lawrence; the modernist Virginia Woolf; the satirist Evelyn Waugh; the prophetic novelist George Orwell; the popular novelists W. Somerset Maugham and Graham Greene; the crime writer Agatha Christie (the best-selling novelist of all time);[397] Ian Fleming (the creator of James Bond); the poets T.S. Eliot, Philip Larkin and Ted Hughes; the fantasy writers J. R. R. Tolkien, C. S. Lewis and J. K. Rowling; the graphic novelist Alan Moore, whose novel Watchmen is often cited by critics as comic's greatest series and graphic novel[398] and one of the best-selling graphic novels ever published.[399] - -A photograph of Victorian era novelist Charles Dickens -Scotland's contributions include the detective writer Arthur Conan Doyle (the creator of Sherlock Holmes), romantic literature by Sir Walter Scott, the children's writer J. M. Barrie, the epic adventures of Robert Louis Stevenson and the celebrated poet Robert Burns. More recently the modernist and nationalist Hugh MacDiarmid and Neil M. Gunn contributed to the Scottish Renaissance. A more grim outlook is found in Ian Rankin's stories and the psychological horror-comedy of Iain Banks. Scotland's capital, Edinburgh, was UNESCO's first worldwide City of Literature.[400] -Britain's oldest known poem, Y Gododdin, was composed in Yr Hen Ogledd (The Old North), most likely in the late 6th century. It was written in Cumbric or Old Welsh and contains the earliest known reference to King Arthur.[401] From around the seventh century, the connection between Wales and the Old North was lost, and the focus of Welsh-language culture shifted to Wales, where Arthurian legend was further developed by Geoffrey of Monmouth.[402] Wales's most celebrated medieval poet, Dafydd ap Gwilym (fl.1320–1370), composed poetry on themes including nature, religion and especially love. He is widely regarded as one of the greatest European poets of his age.[403] Until the late 19th century the majority of Welsh literature was in Welsh and much of the prose was religious in character. Daniel Owen is credited as the first Welsh-language novelist, publishing Rhys Lewis in 1885. The best-known of the Anglo-Welsh poets are both Thomases. Dylan Thomas became famous on both sides of the Atlantic in the mid-20th century. He is remembered for his poetry – his "Do not go gentle into that good night; Rage, rage against the dying of the light." is one of the most quoted couplets of English language verse – and for his 'play for voices', Under Milk Wood. The influential Church in Wales 'poet-priest' and Welsh nationalist R. S. Thomas was nominated for the Nobel Prize in Literature in 1996. Leading Welsh novelists of the twentieth century include Richard Llewellyn and Kate Roberts.[404][405] -Authors of other nationalities, particularly from Commonwealth countries, the Republic of Ireland and the United States, have lived and worked in the UK. Significant examples through the centuries include Jonathan Swift, Oscar Wilde, Bram Stoker, George Bernard Shaw, Joseph Conrad, T.S. Eliot, Ezra Pound and more recently British authors born abroad such as Kazuo Ishiguro and Sir Salman Rushdie.[406][407] -Music -Main article: Music of the United Kingdom -See also: British rock - -The Beatles are the most commercially successful and critically acclaimed band in the history of music, selling over a billion records internationally.[408][409][410] -Various styles of music are popular in the UK from the indigenous folk music of England, Wales, Scotland and Northern Ireland to heavy metal. Notable composers of classical music from the United Kingdom and the countries that preceded it include William Byrd, Henry Purcell, Sir Edward Elgar, Gustav Holst, Sir Arthur Sullivan (most famous for working with the librettist Sir W. S. Gilbert), Ralph Vaughan Williams and Benjamin Britten, pioneer of modern British opera. Sir Peter Maxwell Davies is one of the foremost living composers and current Master of the Queen's Music. The UK is also home to world-renowned symphonic orchestras and choruses such as the BBC Symphony Orchestra and the London Symphony Chorus. Notable conductors include Sir Simon Rattle, John Barbirolli and Sir Malcolm Sargent. Some of the notable film score composers include John Barry, Clint Mansell, Mike Oldfield, John Powell, Craig Armstrong, David Arnold, John Murphy, Monty Norman and Harry Gregson-Williams. George Frideric Handel, although born German, was a naturalised British citizen[411] and some of his best works, such as Messiah, were written in the English language.[412] Andrew Lloyd Webber has achieved enormous worldwide commercial success and is a prolific composer of musical theatre, works which have dominated London's West End for a number of years and have travelled to Broadway in New York.[413] -The Beatles have international sales of over one billion units and are the biggest-selling and most influential band in the history of popular music.[408][409][410][414] Other prominent British contributors to have influenced popular music over the last 50 years include; The Rolling Stones, Led Zeppelin, Pink Floyd, Queen, the Bee Gees, and Elton John, all of whom have world wide record sales of 200 million or more.[415][416][417][418][419][420] The Brit Awards are the BPI's annual music awards, and some of the British recipients of the Outstanding Contribution to Music award include; The Who, David Bowie, Eric Clapton, Rod Stewart and The Police.[421] More recent UK music acts that have had international success include Coldplay, Radiohead, Oasis, Spice Girls, Robbie Williams, Amy Winehouse and Adele.[422] -A number of UK cities are known for their music. Acts from Liverpool have had more UK chart number one hit singles per capita (54) than any other city worldwide.[423] Glasgow's contribution to music was recognised in 2008 when it was named a UNESCO City of Music, one of only three cities in the world to have this honour.[424] -Visual art -Main article: Art of the United Kingdom - -J. M. W. Turner self-portrait, oil on canvas, c. 1799 -The history of British visual art forms part of western art history. Major British artists include: the Romantics William Blake, John Constable, Samuel Palmer and J.M.W. Turner; the portrait painters Sir Joshua Reynolds and Lucian Freud; the landscape artists Thomas Gainsborough and L. S. Lowry; the pioneer of the Arts and Crafts Movement William Morris; the figurative painter Francis Bacon; the Pop artists Peter Blake, Richard Hamilton and David Hockney; the collaborative duo Gilbert and George; the abstract artist Howard Hodgkin; and the sculptors Antony Gormley, Anish Kapoor and Henry Moore. During the late 1980s and 1990s the Saatchi Gallery in London helped to bring to public attention a group of multi-genre artists who would become known as the "Young British Artists": Damien Hirst, Chris Ofili, Rachel Whiteread, Tracey Emin, Mark Wallinger, Steve McQueen, Sam Taylor-Wood and the Chapman Brothers are among the better-known members of this loosely affiliated movement. -The Royal Academy in London is a key organisation for the promotion of the visual arts in the United Kingdom. Major schools of art in the UK include: the six-school University of the Arts London, which includes the Central Saint Martins College of Art and Design and Chelsea College of Art and Design; Goldsmiths, University of London; the Slade School of Fine Art (part of University College London); the Glasgow School of Art; the Royal College of Art; and The Ruskin School of Drawing and Fine Art (part of the University of Oxford). The Courtauld Institute of Art is a leading centre for the teaching of the history of art. Important art galleries in the United Kingdom include the National Gallery, National Portrait Gallery, Tate Britain and Tate Modern (the most-visited modern art gallery in the world, with around 4.7 million visitors per year).[425] -Cinema -Main article: Cinema of the United Kingdom - -Film director Alfred Hitchcock -The United Kingdom has had a considerable influence on the history of the cinema. The British directors Alfred Hitchcock, whose film Vertigo is considered by some critics as the best film of all time,[426] and David Lean are among the most critically acclaimed of all-time.[427] Other important directors including Charlie Chaplin,[428] Michael Powell,[429] Carol Reed[430] and Ridley Scott.[431] Many British actors have achieved international fame and critical success, including: Julie Andrews,[432] Richard Burton,[433] Michael Caine,[434] Charlie Chaplin,[435] Sean Connery,[436] Vivien Leigh,[437] David Niven,[438] Laurence Olivier,[439] Peter Sellers,[440] Kate Winslet,[441] and Daniel Day-Lewis, the only person to win an Oscar in the best actor category three times.[442] Some of the most commercially successful films of all time have been produced in the United Kingdom, including the two highest-grossing film franchises (Harry Potter and James Bond).[443] Ealing Studios has a claim to being the oldest continuously working film studio in the world.[444] -Despite a history of important and successful productions, the industry has often been characterised by a debate about its identity and the level of American and European influence. British producers are active in international co-productions and British actors, directors and crew feature regularly in American films. Many successful Hollywood films have been based on British people, stories or events, including Titanic, The Lord of the Rings, Pirates of the Caribbean. -In 2009, British films grossed around $2 billion worldwide and achieved a market share of around 7% globally and 17% in the United Kingdom.[445] UK box-office takings totalled £944 million in 2009, with around 173 million admissions.[445] The British Film Institute has produced a poll ranking of what it considers to be the 100 greatest British films of all time, the BFI Top 100 British films.[446] The annual British Academy Film Awards, hosted by the British Academy of Film and Television Arts, are the British equivalent of the Oscars.[447] -Media -Main article: Media of the United Kingdom - -Broadcasting House in London, headquarters of the BBC, the oldest and largest broadcaster in the world.[448][449][450] -The BBC, founded in 1922, is the UK's publicly funded radio, television and Internet broadcasting corporation, and is the oldest and largest broadcaster in the world.[448][449][450] It operates numerous television and radio stations in the UK and abroad and its domestic services are funded by the television licence.[451][452] Other major players in the UK media include ITV plc, which operates 11 of the 15 regional television broadcasters that make up the ITV Network,[453] and News Corporation, which owns a number of national newspapers through News International such as the most popular tabloid The Sun and the longest-established daily "broadsheet" The Times,[454] as well as holding a large stake in satellite broadcaster British Sky Broadcasting.[455] London dominates the media sector in the UK: national newspapers and television and radio are largely based there, although Manchester is also a significant national media centre. Edinburgh and Glasgow, and Cardiff, are important centres of newspaper and broadcasting production in Scotland and Wales respectively.[456] The UK publishing sector, including books, directories and databases, journals, magazines and business media, newspapers and news agencies, has a combined turnover of around £20 billion and employs around 167,000 people.[457] -In 2009, it was estimated that individuals viewed a mean of 3.75 hours of television per day and 2.81 hours of radio. In that year the main BBC public service broadcasting channels accounted for an estimated 28.4% of all television viewing; the three main independent channels accounted for 29.5% and the increasingly important other satellite and digital channels for the remaining 42.1%.[458] Sales of newspapers have fallen since the 1970s and in 2009 42% of people reported reading a daily national newspaper.[459] In 2010 82.5% of the UK population were Internet users, the highest proportion amongst the 20 countries with the largest total number of users in that year.[460] -Philosophy -Main article: British philosophy -The United Kingdom is famous for the tradition of 'British Empiricism', a branch of the philosophy of knowledge that states that only knowledge verified by experience is valid, and 'Scottish Philosophy', sometimes referred to as the 'Scottish School of Common Sense'.[461] The most famous philosophers of British Empiricism are John Locke, George Berkeley and David Hume; while Dugald Stewart, Thomas Reid and William Hamilton were major exponents of the Scottish "common sense" school. Two Britons are also notable for a theory of moral philosophy utilitarianism, first used by Jeremy Bentham and later by John Stuart Mill in his short work Utilitarianism.[462][463] Other eminent philosophers from the UK and the unions and countries that preceded it include Duns Scotus, John Lilburne, Mary Wollstonecraft, Sir Francis Bacon, Adam Smith, Thomas Hobbes, William of Ockham, Bertrand Russell and A.J. "Freddie" Ayer. Foreign-born philosophers who settled in the UK include Isaiah Berlin, Karl Marx, Karl Popper and Ludwig Wittgenstein. -Sport -Main article: Sport in the United Kingdom - -Wembley Stadium, London, home of the England national football team, is one of the most expensive stadia ever built.[464] -Major sports, including association football, tennis, rugby union, rugby league, golf, boxing, rowing and cricket, originated or were substantially developed in the UK and the states that preceded it. With the rules and codes of many modern sports invented and codified in late 19th-century Victorian Britain, in 2012, the President of the IOC, Jacques Rogge, stated; "This great, sports-loving country is widely recognized as the birthplace of modern sport. It was here that the concepts of sportsmanship and fair play were first codified into clear rules and regulations. It was here that sport was included as an educational tool in the school curriculum".[465][466] -In most international competitions, separate teams represent England, Scotland and Wales. Northern Ireland and the Republic of Ireland usually field a single team representing all of Ireland, with notable exceptions being association football and the Commonwealth Games. In sporting contexts, the English, Scottish, Welsh and Irish / Northern Irish teams are often referred to collectively as the Home Nations. There are some sports in which a single team represents the whole of United Kingdom, including the Olympics, where the UK is represented by the Great Britain team. The 1908, 1948 and 2012 Summer Olympics were held in London, making it the first city to host the games three times. Britain has participated in every modern Olympic Games to date and is third in the medal count. -A 2003 poll found that football is the most popular sport in the United Kingdom.[467] Each of the Home Nations has its own football association, national team and league system. The English top division, the Premier League, is the most watched football league in the world.[468] The first-ever international football match was contested by England and Scotland on 30 November 1872.[469] England, Scotland, Wales and Northern Ireland compete as separate countries in international competitions.[470] A Great Britain Olympic football team was assembled for the first time to compete in the London 2012 Olympic Games. However, the Scottish, Welsh and Northern Irish football associations declined to participate, fearing that it would undermine their independent status – a fear confirmed by FIFA president Sepp Blatter.[471] - -The Millennium Stadium, Cardiff, opened for the 1999 Rugby World Cup. -Cricket was invented in England. The England cricket team, controlled by the England and Wales Cricket Board,[472] is the only national team in the UK with Test status. Team members are drawn from the main county sides, and include both English and Welsh players. Cricket is distinct from football and rugby where Wales and England field separate national teams, although Wales had fielded its own team in the past. Irish and Scottish players have played for England because neither Scotland nor Ireland have Test status and have only recently started to play in One Day Internationals.[473][474] Scotland, England (and Wales), and Ireland (including Northern Ireland) have competed at the Cricket World Cup, with England reaching the finals on three occasions. There is a professional league championship in which clubs representing 17 English counties and 1 Welsh county compete.[475] -Rugby league is a popular sport in some regions of the UK. It originated in Huddersfield and is generally played in Northern England.[476] A single 'Great Britain Lions' team had competed in the Rugby League World Cup and Test match games, but this changed in 2008 when England, Scotland and Ireland competed as separate nations.[477] Great Britain is still being retained as the full national team for Ashes tours against Australia, New Zealand and France. Super League is the highest level of professional rugby league in the UK and Europe. It consists of 11 teams from Northern England, 1 from London, 1 from Wales and 1 from France. -In rugby union, England, Scotland, Wales, Ireland, France and Italy compete in the Six Nations Championship; the premier international tournament in the northern hemisphere. Sport governing bodies in England, Scotland, Wales and Ireland organise and regulate the game separately.[478] If any of the British teams or the Irish team beat the other three in a tournament, then it is awarded the Triple Crown.[479] - -The Wimbledon Championships, a Grand Slam tennis tournament, is held in Wimbledon, London every June or July. -Thoroughbred racing, which originated under Charles II of England as the "sport of kings", is popular throughout the UK with world-famous races including the Grand National, the Epsom Derby, Royal Ascot and the Cheltenham National Hunt Festival (including the Cheltenham Gold Cup). The UK has proved successful in the international sporting arena in rowing. -The UK is closely associated with motorsport. Many teams and drivers in Formula One (F1) are based in the UK, and the country has won more drivers' and constructors' titles than any other. The UK hosted the very first F1 Grand Prix in 1950 at Silverstone, the current location of the British Grand Prix held each year in July. The country also hosts legs of the Grand Prix motorcycle racing, World Rally Championship and FIA World Endurance Championship. The premier national auto racing event is the British Touring Car Championship (BTCC). Motorcycle road racing has a long tradition with races such as the Isle of Man TT and the North West 200. -Golf is the sixth-most popular sport, by participation, in the UK. Although The Royal and Ancient Golf Club of St Andrews in Scotland is the sport's home course,[480] the world's oldest golf course is actually Musselburgh Links' Old Golf Course.[481] -Snooker is one of the UK's popular sporting exports, with the world championships held annually in Sheffield.[482] The modern game of lawn tennis first originated in the city of Birmingham between 1859 and 1865.[483] The Championships, Wimbledon are international tennis events held in Wimbledon in south London every summer and are regarded as the most prestigious event of the global tennis calendar. In Northern Ireland Gaelic football and hurling are popular team sports, both in terms of participation and spectating, and Irish expatriates in the UK and the US also play them.[484] Shinty (or camanachd) is popular in the Scottish Highlands.[485] -Symbols -Main article: Symbols of the United Kingdom, the Channel Islands and the Isle of Man - -The Statue of Britannia in Plymouth. Britannia is a national personification of the UK. -The flag of the United Kingdom is the Union Flag (also referred to as the Union Jack). It was created in 1606 by the superimposition of the Flag of England on the Flag of Scotland and updated in 1801 with the addition of Saint Patrick's Flag. Wales is not represented in the Union Flag, as Wales had been conquered and annexed to England prior to the formation of the United Kingdom. The possibility of redesigning the Union Flag to include representation of Wales has not been completely ruled out.[486] The national anthem of the United Kingdom is "God Save the King", with "King" replaced with "Queen" in the lyrics whenever the monarch is a woman. -Britannia is a national personification of the United Kingdom, originating from Roman Britain.[487] Britannia is symbolised as a young woman with brown or golden hair, wearing a Corinthian helmet and white robes. She holds Poseidon's three-pronged trident and a shield, bearing the Union Flag. Sometimes she is depicted as riding on the back of a lion. Since the height of the British Empire in the late 19th century, Britannia has often been associated with British maritime dominance, as in the patriotic song "Rule, Britannia!". Up until 2008, the lion symbol was depicted behind Britannia on the British fifty pence coin and on the back of the British ten pence coin. It is also used as a symbol on the non-ceremonial flag of the British Army. The bulldog is sometimes used as a symbol of the United Kingdom and has been associated with Winston Churchill's defiance of Nazi Germany.[488] -See also -Outline of the United Kingdom - United Kingdom – Wikipedia book -Walking in the United Kingdom -Flag of the United Kingdom.svgUnited Kingdom portal Flag of Europe.svgEuropean Union portal Europe green light.pngEurope portal -Notes -Jump up ^ The Royal coat of arms used in Scotland: - Royal Coat of Arms of the United Kingdom (Scotland).svg -Jump up ^ There is no authorised version of the national anthem as the words are a matter of tradition; only the first verse is usually sung.[1] No law was passed making "God Save the Queen" the official anthem. In the English tradition, such laws are not necessary; proclamation and usage are sufficient to make it the national anthem. "God Save the Queen" also serves as the Royal anthem for several other countries, namely certain Commonwealth realms. -Jump up ^ Under the Council of Europe's European Charter for Regional or Minority Languages, Scots, Ulster-Scots, Welsh, Cornish, Irish and Scottish Gaelic, are officially recognised as regional or minority languages by the British government for the purposes of the Charter. See also Languages of the United Kingdom.[2] -Jump up ^ Although Northern Ireland is the only part of the UK that shares a land border with another state, two of its Overseas Territories also share land borders with other states. Gibraltar shares a border with Spain, while the Sovereign Base Areas of Akrotiri and Dhekelia share borders with the Republic of Cyprus, Turkish Republic of Northern Cyprus and UN buffer zone separating the two Cypriot polities. -Jump up ^ The Anglo-Irish Treaty was signed on 6 December 1921 to resolve the Irish War of Independence. Effective one year later, it established the Irish Free State as a separate dominion within the Commonwealth. The UK's current name was adopted in 1927 to reflect the change. -Jump up ^ Compare to section 1 of both of the 1800 Acts of Union which reads: the Kingdoms of Great Britain and Ireland shall...be united into one Kingdom, by the Name of "The United Kingdom of Great Britain and Ireland" -Jump up ^ New Zealand, Israel and San Marino are the other countries with uncodified constitutions. -Jump up ^ Since the early twentieth century the prime minister has held the office of First Lord of the Treasury, and in recent decades has also held the office of Minister for the Civil Service. -Jump up ^ Sinn Féin, an Irish republican party, also contests elections in the Republic of Ireland. -Jump up ^ In 2007–2008, this was calculated to be £115 per week for single adults with no dependent children; £199 per week for couples with no dependent children; £195 per week for single adults with two dependent children under 14; and £279 per week for couples with two dependent children under 14. -References -Jump up ^ National Anthem, British Monarchy official website. Retrieved 16 November 2013. -^ Jump up to: a b c "List of declarations made with respect to treaty No. 148". Council of Europe. Retrieved 12 December 2013. -^ Jump up to: a b "Population Estimates for UK, England and Wales, Scotland and Northern Ireland, Mid-2013". Office for National Statistics. Retrieved 26 June 2014. -Jump up ^ "2011 UK censuses". Office for National Statistics. Retrieved 17 December 2012. -^ Jump up to: a b c d "United Kingdom". International Monetary Fund. Retrieved 1 November 2014. -Jump up ^ "Gini coefficient of equivalised disposable income (source: SILC)". Eurostat Data Explorer. Retrieved 13 August 2013. -Jump up ^ "2014 Human Development Report". 14 March 2013. pp. 22–25. Retrieved 27 July 2014. -Jump up ^ "Definition of Great Britain in English". Oxford University Press. Retrieved 29 October 2014. Great Britain is the name for the island that comprises England, Scotland, and Wales, although the term is also used loosely to refer to the United Kingdom. -Jump up ^ The British Monarchy, What is constitutional monarchy?. Retrieved 17 July 2013 -Jump up ^ CIA, The World Factbook. Retrieved 17 July 2013 -Jump up ^ "The World Factbook". Central Intelligence Agency. 1 February 2014. Retrieved 23 February 2014. -^ Jump up to: a b "Countries within a country". Prime Minister's Office. 10 January 2003. -^ Jump up to: a b "Devolution of powers to Scotland, Wales, and Northern Ireland". United Kingdom Government. Retrieved 17 April 2013. In a similar way to how the government is formed from members from the two Houses of Parliament, members of the devolved legislatures nominate ministers from among themselves to comprise an executive, known as the devolved administrations... -Jump up ^ "Fall in UK university students". BBC News. 29 January 2009. -Jump up ^ "Country Overviews: United Kingdom". Transport Research Knowledge Centre. Retrieved 28 March 2010. -Jump up ^ "Key facts about the United Kingdom". Directgov. Retrieved 3 May 2011. The full title of this country is 'the United Kingdom of Great Britain and Northern Ireland'. 'The UK' is made up of England, Scotland, Wales and Northern Ireland. 'Britain' is used informally, usually meaning the United Kingdom. 'Great Britain' is made up of England, Scotland and Wales. The Channel Islands and the Isle of Man are not part of the UK.[dead link] -Jump up ^ "Working with Overseas Territories". Foreign and Commonwealth Office. Retrieved 3 May 2011. -Jump up ^ Mathias, P. (2001). The First Industrial Nation: the Economic History of Britain, 1700–1914. London: Routledge. ISBN 0-415-26672-6. -Jump up ^ Ferguson, Niall (2004). Empire: The rise and demise of the British world order and the lessons for global power. New York: Basic Books. ISBN 0-465-02328-2. -Jump up ^ Sheridan, Greg (15 May 2010). "Cameron has chance to make UK great again". The Australian (Sydney). Retrieved 23 May 2011. -Jump up ^ Dugan, Emily (18 November 2012). "Britain is now most powerful nation on earth". The Independent (London). Retrieved 18 November 2012. -^ Jump up to: a b "The 15 countries with the highest military expenditure in 2013 (table)" (PDF). Stockholm International Peace Research Institute. Retrieved 4 May 2014. -^ Jump up to: a b The Military Balance 2014: Top 15 Defence Budgets 2013 (IISS) -Jump up ^ "Treaty of Union, 1706". Scots History Online. Retrieved 23 August 2011. -Jump up ^ Barnett, Hilaire; Jago, Robert (2011). Constitutional & Administrative Law (8th ed.). Abingdon: Routledge. p. 165. ISBN 978-0-415-56301-7. -Jump up ^ Gascoigne, Bamber. "History of Great Britain (from 1707)". History World. Retrieved 18 July 2011. -Jump up ^ Cottrell, P. (2008). The Irish Civil War 1922–23. p. 85. ISBN 1-84603-270-9. -^ Jump up to: a b S. Dunn; H. Dawson (2000), An Alphabetical Listing of Word, Name and Place in Northern Ireland and the Living Language of Conflict, Lampeter: Edwin Mellen Press, One specific problem - in both general and particular senses - is to know what to call Northern Ireland itself: in the general sense, it is not a country, or a province, or a state - although some refer to it contemptuously as a statelet: the least controversial word appears to be jurisdiction, but this might change. -Jump up ^ "Changes in the list of subdivision names and code elements". ISO 3166-2. International Organization for Standardization. 15 December 2011. Retrieved 28 May 2012. -Jump up ^ Population Trends, Issues 75–82, p.38, 1994, UK Office of Population Censuses and Surveys -Jump up ^ Life in the United Kingdom: a journey to citizenship, p. 7, United Kingdom Home Office, 2007, ISBN 978-0-11-341313-3. -Jump up ^ "Statistical bulletin: Regional Labour Market Statistics". Retrieved 5 March 2014. -Jump up ^ "13.4% Fall In Earnings Value During Recession". Retrieved 5 March 2014. -Jump up ^ Murphy, Dervla (1979). A Place Apart. London: Penguin. ISBN 978-0-14-005030-1. -Jump up ^ Whyte, John; FitzGerald, Garret (1991). Interpreting Northern Ireland. Oxford: Clarendon Press. ISBN 978-0-19-827380-6. -Jump up ^ "Guardian Unlimited Style Guide". London: Guardian News and Media Limited. 19 December 2008. Retrieved 23 August 2011. -Jump up ^ "BBC style guide (Great Britain)". BBC News. 19 August 2002. Retrieved 23 August 2011. -Jump up ^ "Key facts about the United Kingdom". Government, citizens and rights. HM Government. Retrieved 24 August 2011.[dead link] -Jump up ^ "Merriam-Webster Dictionary Online Definition of ''Great Britain''". Merriam Webster. 31 August 2012. Retrieved 9 April 2013. -Jump up ^ New Oxford American Dictionary: "Great Britain: England, Wales, and Scotland considered as a unit. The name is also often used loosely to refer to the United Kingdom." -Jump up ^ "Great Britain". International Olympic Committee. Retrieved 10 May 2011. -Jump up ^ "Team GB – Our Greatest Team". British Olympic Association. Retrieved 10 May 2011.[dead link] -Jump up ^ Bradley, Anthony Wilfred; Ewing, Keith D. (2007). Constitutional and administrative law 1 (14th ed.). Harlow: Pearson Longman. p. 36. ISBN 978-1-4058-1207-8. -Jump up ^ "Which of these best describes the way you think of yourself?". Northern Ireland Life and Times Survey 2010. ARK – Access Research Knowledge. 2010. Retrieved 1 July 2010. -Jump up ^ Schrijver, Frans (2006). Regionalism after regionalisation: Spain, France and the United Kingdom. Amsterdam University Press. pp. 275–277. ISBN 978-90-5629-428-1. -Jump up ^ Jack, Ian (11 December 2010). "Why I'm saddened by Scotland going Gaelic". The Guardian (London). -Jump up ^ Ffeithiau allweddol am y Deyrnas Unedig : Directgov – Llywodraeth, dinasyddion a hawliau[dead link] -Jump up ^ "Ancient skeleton was 'even older'". BBC News. 30 October 2007. Retrieved 27 April 2011. -Jump up ^ Koch, John T. (2006). Celtic culture: A historical encyclopedia. Santa Barbara, CA: ABC-CLIO. p. 973. ISBN 978-1-85109-440-0. -Jump up ^ Davies, John; Jenkins, Nigel; Baines, Menna; Lynch, Peredur I., eds. (2008). The Welsh Academy Encyclopaedia of Wales. Cardiff: University of Wales Press. p. 915. ISBN 978-0-7083-1953-6. -Jump up ^ "Short Athelstan biography". BBC History. Retrieved 9 April 2013. -Jump up ^ Mackie, J.D. (1991). A History of Scotland. London: Penguin. pp. 18–19. ISBN 978-0-14-013649-4. -Jump up ^ Campbell, Ewan (1999). Saints and Sea-kings: The First Kingdom of the Scots. Edinburgh: Canongate. pp. 8–15. ISBN 0-86241-874-7. -Jump up ^ Haigh, Christopher (1990). The Cambridge Historical Encyclopedia of Great Britain and Ireland. Cambridge University Press. p. 30. ISBN 978-0-521-39552-6. -Jump up ^ Ganshof, F.L. (1996). Feudalism. University of Toronto. p. 165. ISBN 978-0-8020-7158-3. -Jump up ^ Chibnall, Marjorie (1999). The debate on the Norman Conquest. Manchester University Press. pp. 115–122. ISBN 978-0-7190-4913-2. -Jump up ^ Keen, Maurice. "The Hundred Years War". BBC History. -Jump up ^ The Reformation in England and Scotland and Ireland: The Reformation Period & Ireland under Elizabth I, Encyclopædia Britannica Online. -Jump up ^ "British History in Depth – Wales under the Tudors". BBC History. 5 November 2009. Retrieved 21 September 2010. -Jump up ^ Nicholls, Mark (1999). A history of the modern British Isles, 1529–1603: The two kingdoms. Oxford: Blackwell. pp. 171–172. ISBN 978-0-631-19334-0. -Jump up ^ Canny, Nicholas P. (2003). Making Ireland British, 1580–1650. Oxford University Press. pp. 189–200. ISBN 978-0-19-925905-2. -Jump up ^ Ross, D. (2002). Chronology of Scottish History. Glasgow: Geddes & Grosset. p. 56. ISBN 1-85534-380-0 -Jump up ^ Hearn, J. (2002). Claiming Scotland: National Identity and Liberal Culture. Edinburgh University Press. p. 104. ISBN 1-902930-16-9 -Jump up ^ "English Civil Wars". Encyclopaedia Britannica. Retrieved 28 April 2013. -Jump up ^ "Scotland and the Commonwealth: 1651–1660". Archontology.org. 14 March 2010. Retrieved 20 April 2010. -Jump up ^ Lodge, Richard (2007) [1910]. The History of England – From the Restoration to the Death of William III (1660–1702). Read Books. p. 8. ISBN 978-1-4067-0897-4. -Jump up ^ "Tudor Period and the Birth of a Regular Navy". Royal Navy History. Institute of Naval History. Retrieved 24 December 2010.[dead link] -Jump up ^ Canny, Nicholas (1998). The Origins of Empire, The Oxford History of the British Empire Volume I. Oxford University Press. ISBN 0-19-924676-9. -Jump up ^ "Articles of Union with Scotland 1707". UK Parliament. Retrieved 19 October 2008. -Jump up ^ "Acts of Union 1707". UK Parliament. Retrieved 6 January 2011. -Jump up ^ "Treaty (act) of Union 1706". Scottish History online. Retrieved 3 February 2011. -Jump up ^ Library of Congress, The Impact of the American Revolution Abroad, p. 73. -Jump up ^ Loosemore, Jo (2007). Sailing against slavery. BBC Devon. 2007. -Jump up ^ "The Act of Union". Act of Union Virtual Library. Retrieved 15 May 2006. -Jump up ^ Tellier, L.-N. (2009). Urban World History: an Economic and Geographical Perspective. Quebec: PUQ. p. 463. ISBN 2-7605-1588-5. -Jump up ^ Sondhaus, L. (2004). Navies in Modern World History. London: Reaktion Books. p. 9. ISBN 1-86189-202-0. -Jump up ^ Porter, Andrew (1998). The Nineteenth Century, The Oxford History of the British Empire Volume III. Oxford University Press. p. 332. ISBN 0-19-924678-5. -Jump up ^ "The Workshop of the World". BBC History. Retrieved 28 April 2013. -Jump up ^ Porter, Andrew (1998). The Nineteenth Century, The Oxford History of the British Empire Volume III. Oxford University Press. p. 8. ISBN 0-19-924678-5. -Jump up ^ Marshall, P.J. (1996). The Cambridge Illustrated History of the British Empire. Cambridge University Press. pp. 156–57. ISBN 0-521-00254-0. -Jump up ^ Tompson, Richard S. (2003). Great Britain: a reference guide from the Renaissance to the present. New York: Facts on File. p. 63. ISBN 978-0-8160-4474-0. -Jump up ^ Hosch, William L. (2009). World War I: People, Politics, and Power. America at War. New York: Britannica Educational Publishing. p. 21. ISBN 978-1-61530-048-8. -Jump up ^ Turner, John (1988). Britain and the First World War. London: Unwin Hyman. pp. 22–35. ISBN 978-0-04-445109-9. -^ Jump up to: a b Westwell, I.; Cove, D. (eds) (2002). History of World War I, Volume 3. London: Marshall Cavendish. pp. 698 and 705. ISBN 0-7614-7231-2. -Jump up ^ Turner, J. (1988). Britain and the First World War. Abingdon: Routledge. p. 41. ISBN 0-04-445109-1. -Jump up ^ SR&O 1921, No. 533 of 3 May 1921. -Jump up ^ "The Anglo-Irish Treaty, 6 December 1921". CAIN. Retrieved 15 May 2006. -Jump up ^ Rubinstein, W. D. (2004). Capitalism, Culture, and Decline in Britain, 1750–1990. Abingdon: Routledge. p. 11. ISBN 0-415-03719-0. -Jump up ^ "Britain to make its final payment on World War II loan from U.S.". The New York Times. 28 December 2006. Retrieved 25 August 2011. -Jump up ^ Francis, Martin (1997). Ideas and policies under Labour, 1945–1951: Building a new Britain. Manchester University Press. pp. 225–233. ISBN 978-0-7190-4833-3. -Jump up ^ Lee, Stephen J. (1996). Aspects of British political history, 1914–1995. London; New York: Routledge. pp. 173–199. ISBN 978-0-415-13103-2. -Jump up ^ Larres, Klaus (2009). A companion to Europe since 1945. Chichester: Wiley-Blackwell. p. 118. ISBN 978-1-4051-0612-2. -Jump up ^ "Country List". Commonwealth Secretariat. 19 March 2009. Retrieved 11 September 2012.[dead link] -Jump up ^ Julios, Christina (2008). Contemporary British identity: English language, migrants, and public discourse. Studies in migration and diaspora. Aldershot: Ashgate. p. 84. ISBN 978-0-7546-7158-9. -Jump up ^ Aughey, Arthur (2005). The Politics of Northern Ireland: Beyond the Belfast Agreement. London: Routledge. p. 7. ISBN 978-0-415-32788-6. -Jump up ^ "The troubles were over, but the killing continued. Some of the heirs to Ireland's violent traditions refused to give up their inheritance." Holland, Jack (1999). Hope against History: The Course of Conflict in Northern Ireland. New York: Henry Holt. p. 221. ISBN 978-0-8050-6087-4. -Jump up ^ Elliot, Marianne (2007). The Long Road to Peace in Northern Ireland: Peace Lectures from the Institute of Irish Studies at Liverpool University. University of Liverpool Institute of Irish Studies, Liverpool University Press. p. 2. ISBN 1-84631-065-2. -Jump up ^ Dorey, Peter (1995). British politics since 1945. Making contemporary Britain. Oxford: Blackwell. pp. 164–223. ISBN 978-0-631-19075-2. -Jump up ^ Griffiths, Alan; Wall, Stuart (2007). Applied Economics (11th ed.). Harlow: Financial Times Press. p. 6. ISBN 978-0-273-70822-3. Retrieved 26 December 2010. -Jump up ^ Keating, Michael (1 January 1998). "Reforging the Union: Devolution and Constitutional Change in the United Kingdom". Publius: the Journal of Federalism 28 (1): 217. doi:10.1093/oxfordjournals.pubjof.a029948. Retrieved 4 February 2009. -Jump up ^ Jackson, Mike (3 April 2011). "Military action alone will not save Libya". Financial Times (London). -Jump up ^ "United Kingdom country profile". BBC. 24 January 2013. Retrieved 9 April 2013. -Jump up ^ "Scotland to hold independence poll in 2014 – Salmond". BBC News. 10 January 2012. Retrieved 10 January 2012. -Jump up ^ Oxford English Dictionary: "British Isles: a geographical term for the islands comprising Great Britain and Ireland with all their offshore islands including the Isle of Man and the Channel Islands." -^ Jump up to: a b c d e f "United Kingdom". The World Factbook. Central Intelligence Agency. Retrieved 23 September 2008. -^ Jump up to: a b c d e Latimer Clarke Corporation Pty Ltd. "United Kingdom – Atlapedia Online". Atlapedia.com. Retrieved 26 October 2010. -Jump up ^ ROG Learing Team (23 August 2002). "The Prime Meridian at Greenwich". Royal Museums Greenwich. Royal Museums Greenwich. Retrieved 11 September 2012. -Jump up ^ Neal, Clare. "How long is the UK coastline?". British Cartographic Society. Retrieved 26 October 2010. -Jump up ^ "The Channel Tunnel". Eurotunnel. Retrieved 29 November 2010.[dead link] -Jump up ^ "England – Profile". BBC News. 11 February 2010. -Jump up ^ "Scotland Facts". Scotland Online Gateway. Archived from the original on 21 June 2008. Retrieved 16 July 2008. -Jump up ^ Winter, Jon (19 May 2001). "The complete guide to Scottish Islands". The Independent (London). -Jump up ^ "Overview of Highland Boundary Fault". Gazetteer for Scotland. University of Edinburgh. Retrieved 27 December 2010. -Jump up ^ "Ben Nevis Weather". Ben Nevis Weather. Retrieved 26 October 2008. -Jump up ^ "Profile: Wales". BBC News. 9 June 2010. Retrieved 7 November 2010. -Jump up ^ Giles Darkes (26 April 2014). "How long is the UK coastline?". The British Cartographic Society. -Jump up ^ "Geography of Northern Ireland". University of Ulster. Retrieved 22 May 2006. -Jump up ^ "UK climate summaries". Met Office. Retrieved 1 May 2011. -Jump up ^ United Nations Economic and Social Council (August 2007). "Ninth UN Conference on the standardization of Geographical Names". UN Statistics Division. Archived from the original on 1 December 2009. Retrieved 21 October 2008. -Jump up ^ Barlow, I.M. (1991). Metropolitan Government. London: Routledge. ISBN 978-0-415-02099-2. -Jump up ^ "Welcome to the national site of the Government Office Network". Government Offices. Archived from the original on 15 June 2009. Retrieved 3 July 2008. -Jump up ^ "A short history of London government". Greater London Authority. Archived from the original on 21 April 2008. Retrieved 4 October 2008. -Jump up ^ Sherman, Jill; Norfolk, Andrew (5 November 2004). "Prescott's dream in tatters as North East rejects assembly". The Times (London). Retrieved 15 February 2008. The Government is now expected to tear up its twelve-year-old plan to create eight or nine regional assemblies in England to mirror devolution in Scotland and Wales. (subscription required) -Jump up ^ "Local Authority Elections". Local Government Association. Retrieved 3 October 2008.[dead link] -Jump up ^ "STV in Scotland: Local Government Elections 2007". Political Studies Association. Archived from the original on 20 March 2011. Retrieved 2 August 2008. -Jump up ^ Ethical Standards in Public Life framework: "Ethical Standards in Public Life". The Scottish Government. Retrieved 3 October 2008. -Jump up ^ "Who we are". Convention of Scottish Local Authorities. Retrieved 5 July 2011. -Jump up ^ "Local Authorities". The Welsh Assembly Government. Retrieved 31 July 2008. -Jump up ^ "Local government elections in Wales". The Electoral Commission. 2008. Retrieved 8 April 2011. -Jump up ^ "Welsh Local Government Association". Welsh Local Government Association. Retrieved 20 March 2008. -Jump up ^ Devenport, Mark (18 November 2005). "NI local government set for shake-up". BBC News. Retrieved 15 November 2008. -Jump up ^ "Foster announces the future shape of local government" (Press release). Northern Ireland Executive. 13 March 2008. Retrieved 20 October 2008. -Jump up ^ "Local Government elections to be aligned with review of public administration" (Press release). Northern Ireland Office. 25 April 2008. Retrieved 2 August 2008.[dead link] -Jump up ^ "CIBC PWM Global – Introduction to The Cayman Islands". Cibc.com. 11 July 2012. Retrieved 17 August 2012. -Jump up ^ Rappeport, Laurie. "Cayman Islands Tourism". Washington DC: USA Today Travel Tips. Retrieved 9 April 2013. -Jump up ^ "Working with Overseas Territories". Foreign & Commonwealth Office. 6 October 2010. Retrieved 5 November 2010. -Jump up ^ http://www.justice.gov.uk/downloads/about/moj/our-responsibilities/Background_Briefing_on_the_Crown_Dependencies2.pdf -Jump up ^ "Overseas Territories". Foreign & Commonwealth Office. Retrieved 6 September 2010. -Jump up ^ "The World Factbook". CIA. Retrieved 26 December 2010. -Jump up ^ "Country profiles". Foreign & Commonwealth Office. 21 February 2008. Retrieved 6 September 2010.[dead link] -Jump up ^ Davison, Phil (18 August 1995). "Bermudians vote to stay British". The Independent (London). Retrieved 11 September 2012. -Jump up ^ The Committee Office, House of Commons. "House of Commons – Crown Dependencies – Justice Committee". Publications.parliament.uk. Retrieved 7 November 2010. -Jump up ^ Fact sheet on the UK's relationship with the Crown Dependencies – gov.uk, Ministry of Justice. Retrieved 25 August 2014. -Jump up ^ "Profile of Jersey". States of Jersey. Retrieved 31 July 2008. The legislature passes primary legislation, which requires approval by The Queen in Council, and enacts subordinate legislation in many areas without any requirement for Royal Sanction and under powers conferred by primary legislation. -Jump up ^ "Chief Minister to meet Channel Islands counterparts – Isle of Man Public Services" (Press release). Isle of Man Government. 29 May 2012. Retrieved 9 April 2013.[dead link] -Jump up ^ Bagehot, Walter (1867). The English Constitution. London: Chapman and Hall. p. 103. -Jump up ^ Carter, Sarah. "A Guide To the UK Legal System". University of Kent at Canterbury. Retrieved 16 May 2006. -Jump up ^ "Parliamentary sovereignty". UK Parliament. n.d. Archived from the original on 27 May 2012. -Jump up ^ "The Government, Prime Minister and Cabinet". Public services all in one place. Directgov. Retrieved 12 February 2010. -Jump up ^ "Brown is UK's new prime minister". BBC News. 27 June 2007. Retrieved 23 January 2008. -Jump up ^ "David Cameron is UK's new prime minister". BBC News. 11 May 2010. Retrieved 11 May 2010. -Jump up ^ November 2010 "Elections and voting". UK Parliament. Archived from the original on 14 November 2010. Retrieved 14 November 2010. -Jump up ^ November 2010 "The Parliament Acts". UK Parliament. Archived from the original on 14 November 2010. -Jump up ^ "United Kingdom". European Election Database. Norwegian Social Science Data Services. Retrieved 3 July 2010. -Jump up ^ Wainwright, Martin (28 May 2010). "Thirsk and Malton: Conservatives take final seat in parliament". The Guardian (London). Retrieved 3 July 2010. -Jump up ^ "Scots MPs attacked over fees vote". BBC News. 27 January 2004. Retrieved 21 October 2008. -Jump up ^ Taylor, Brian (1 June 1998). "Talking Politics: The West Lothian Question". BBC News. Retrieved 21 October 2008. -Jump up ^ "England-only laws 'need majority from English MPs'". BBC News. 25 March 2013. Retrieved 28 April 2013. -Jump up ^ "Scotland's Parliament – powers and structures". BBC News. 8 April 1999. Retrieved 21 October 2008. -Jump up ^ "Salmond elected as first minister". BBC News. 16 May 2007. Retrieved 21 October 2008. -Jump up ^ "Scottish election: SNP wins election". BBC News. 6 May 2011. -Jump up ^ "Structure and powers of the Assembly". BBC News. 9 April 1999. Retrieved 21 October 2008. -Jump up ^ "Carwyn Jones clinches leadership in Wales". WalesOnline (Media Wales). 1 December 2009. Retrieved 1 December 2009. -Jump up ^ "Devolved Government – Ministers and their departments". Northern Ireland Executive. Archived from the original on 22 August 2007. -Jump up ^ Burrows, N. (1999). "Unfinished Business: The Scotland Act 1998". The Modern Law Review 62 (2): 241–60 [p. 249]. doi:10.1111/1468-2230.00203. The UK Parliament is sovereign and the Scottish Parliament is subordinate. The White Paper had indicated that this was to be the approach taken in the legislation. The Scottish Parliament is not to be seen as a reflection of the settled will of the people of Scotland or of popular sovereignty but as a reflection of its subordination to a higher legal authority. Following the logic of this argument, the power of the Scottish Parliament to legislate can be withdrawn or overridden... -Jump up ^ Elliot, M. (2004). "United Kingdom: Parliamentary sovereignty under pressure". International Journal of Constitutional Law 2 (3): 545–627 [pp. 553–554]. doi:10.1093/icon/2.3.545. Notwithstanding substantial differences among the schemes, an important common factor is that the U.K. Parliament has not renounced legislative sovereignty in relation to the three nations concerned. For example, the Scottish Parliament is empowered to enact primary legislation on all matters, save those in relation to which competence is explicitly denied ... but this power to legislate on what may be termed "devolved matters" is concurrent with the Westminster Parliament's general power to legislate for Scotland on any matter at all, including devolved matters ... In theory, therefore, Westminster may legislate on Scottish devolved matters whenever it chooses... -Jump up ^ Walker, G. (2010). "Scotland, Northern Ireland, and Devolution, 1945–1979". Journal of British Studies 39 (1): 124 & 133. doi:10.1086/644536. -Jump up ^ Gamble, A. "The Constitutional Revolution in the United Kingdom". Publius 36 (1): 19–35 [p. 29]. doi:10.1093/publius/pjj011. The British parliament has the power to abolish the Scottish parliament and the Welsh assembly by a simple majority vote in both houses, but since both were sanctioned by referenda, it would be politically difficult to abolish them without the sanction of a further vote by the people. In this way several of the constitutional measures introduced by the Blair government appear to be entrenched and not subject to a simple exercise of parliamentary sovereignty at Westminster. -Jump up ^ Meehan, E. (1999). "The Belfast Agreement—Its Distinctiveness and Points of Cross-Fertilization in the UK's Devolution Programme". Parliamentary Affairs 52 (1): 19–31 [p. 23]. doi:10.1093/pa/52.1.19. [T]he distinctive involvement of two governments in the Northern Irish problem means that Northern Ireland's new arrangements rest upon an intergovernmental agreement. If this can be equated with a treaty, it could be argued that the forthcoming distribution of power between Westminster and Belfast has similarities with divisions specified in the written constitutions of federal states... Although the Agreement makes the general proviso that Westminster's 'powers to make legislation for Northern Ireland' remains 'unaffected', without an explicit categorical reference to reserved matters, it may be more difficult than in Scotland or Wales for devolved powers to be repatriated. The retraction of devolved powers would not merely entail consultation in Northern Ireland backed implicitly by the absolute power of parliamentary sovereignty but also the renegotiation of an intergovernmental agreement. -Jump up ^ "The Treaty (act) of the Union of Parliament 1706". Scottish History Online. Retrieved 5 October 2008. -Jump up ^ "UK Supreme Court judges sworn in". BBC News. 1 October 2009. -Jump up ^ "Constitutional reform: A Supreme Court for the United Kingdom". Department for Constitutional Affairs. July 2003. Retrieved 13 May 2013. -Jump up ^ "Role of the JCPC". Judicial Committee of the Privy Council. Retrieved 28 April 2013. -Jump up ^ Bainham, Andrew (1998). The international survey of family law: 1996. The Hague: Martinus Nijhoff. p. 298. ISBN 978-90-411-0573-8. -Jump up ^ Adeleye, Gabriel; Acquah-Dadzie, Kofi; Sienkewicz, Thomas; McDonough, James (1999). World dictionary of foreign expressions. Waucojnda, IL: Bolchazy-Carducci. p. 371. ISBN 978-0-86516-423-9. -Jump up ^ "The Australian courts and comparative law". Australian Law Postgraduate Network. Retrieved 28 December 2010. -Jump up ^ "Court of Session – Introduction". Scottish Courts. Retrieved 5 October 2008.[dead link] -Jump up ^ "High Court of Justiciary – Introduction". Scottish Courts. Retrieved 5 October 2008.[dead link] -Jump up ^ "House of Lords – Practice Directions on Permission to Appeal". UK Parliament. Retrieved 22 June 2009. -Jump up ^ "Introduction". Scottish Courts. Retrieved 5 October 2008.[dead link] -Jump up ^ Samuel Bray (2005). "Not proven: introducing a third verdict". The University of Chicago Law Review 72 (4): 1299. Retrieved 30 November 2013. -Jump up ^ "Police-recorded crime down by 9%". BBC News. 17 July 2008. Retrieved 21 October 2008. -Jump up ^ "New record high prison population". BBC News. 8 February 2008. Retrieved 21 October 2008. -Jump up ^ "Crime falls to 32 year low" (Press release). Scottish Government. 7 September 2010. Retrieved 21 April 2011. -Jump up ^ "Prisoner Population at Friday 22 August 2008". Scottish Prison Service. Retrieved 28 August 2008. -Jump up ^ "Scots jail numbers at record high". BBC News. 29 August 2008. Retrieved 21 October 2008. -Jump up ^ Swaine, Jon (13 January 2009). "Barack Obama presidency will strengthen special relationship, says Gordon Brown". The Daily Telegraph (London). Retrieved 3 May 2011. -Jump up ^ Kirchner, E. J.; Sperling, J. (2007). Global Security Governance: Competing Perceptions of Security in the 21st Century. London: Taylor & Francis. p. 100. ISBN 0-415-39162-8 -Jump up ^ The Committee Office, House of Commons (19 February 2009). "DFID's expenditure on development assistance". UK Parliament. Retrieved 28 April 2013. -Jump up ^ "Ministry of Defence". Ministry of Defence. Retrieved 21 February 2012. -Jump up ^ "Speaker addresses Her Majesty Queen Elizabeth II". UK Parliament. 30 March 2012. Retrieved 28 April 2013. -Jump up ^ "House of Commons Hansard". UK Parliament. Retrieved 23 October 2008. -Jump up ^ UK 2005: The Official Yearbook of the United Kingdom of Great Britain and Northern Ireland. Office for National Statistics. p. 89. -Jump up ^ "Principles for Economic Regulation". Department for Business, Innovation & Skills. April 2011. Retrieved 1 May 2011. -Jump up ^ "United Kingdom". International Monetary Fund. Retrieved 1 October 2009. -Jump up ^ Chavez-Dreyfuss, Gertrude (1 April 2008). "Global reserves, dollar share up at end of 2007-IMF". Reuters. Retrieved 21 December 2009. -Jump up ^ "More About the Bank". Bank of England. n.d. Archived from the original on 12 March 2008. -Jump up ^ "Index of Services (experimental)". Office for National Statistics. 7 May 2006. Archived from the original on 7 May 2006. -Jump up ^ Sassen, Saskia (2001). The Global City: New York, London, Tokyo (2nd ed.). Princeton University Press. ISBN 0-691-07866-1. -^ Jump up to: a b "Global Financial Centres 7". Z/Yen. 2010. Retrieved 21 April 2010. -^ Jump up to: a b "Worldwide Centres of Commerce Index 2008". Mastercard. Retrieved 5 July 2011. -^ Jump up to: a b Zumbrun, Joshua (15 July 2008). ""World's Most Economically Powerful Cities".". Forbes (New York). Archived from the original on 19 May 2011. Retrieved 3 October 2010. -Jump up ^ "Global city GDP rankings 2008–2025". PricewaterhouseCoopers. Archived from the original on 19 May 2011. Retrieved 16 November 2010. -Jump up ^ Lazarowicz, Mark (Labour MP) (30 April 2003). "Financial Services Industry". UK Parliament. Retrieved 17 October 2008. -Jump up ^ International Tourism Receipts[dead link]. UNWTO Tourism Highlights, Edition 2005. page 12. World Tourism Organisation. Retrieved 24 May 2006. -Jump up ^ Bremner, Caroline (10 January 2010). "Euromonitor International's Top City Destination Ranking". Euromonitor International. Archived from the original on 19 May 2011. Retrieved 31 May 2011. -Jump up ^ "From the Margins to the Mainstream – Government unveils new action plan for the creative industries". DCMS. 9 March 2007. Retrieved 9 March 2007.[dead link] -^ Jump up to: a b "European Countries – United Kingdom". Europa (web portal). Retrieved 15 December 2010. -Jump up ^ Harrington, James W.; Warf, Barney (1995). Industrial location: Principles, practices, and policy. London: Routledge. p. 121. ISBN 978-0-415-10479-1. -Jump up ^ Spielvogel, Jackson J. (2008). Western Civilization: Alternative Volume: Since 1300. Belmont, CA: Thomson Wadsworth. ISBN 978-0-495-55528-5. -Jump up ^ Hewitt, Patricia (15 July 2004). "TUC Manufacturing Conference". Department of Trade and Industry. Retrieved 16 May 2006. -Jump up ^ "Industry topics". Society of Motor Manufacturers and Traders. 2011. Retrieved 5 July 2011. -Jump up ^ Robertson, David (9 January 2009). "The Aerospace industry has thousands of jobs in peril". The Times (London). Retrieved 9 June 2011. (subscription required) -Jump up ^ "Facts & Figures – 2009". Aerospace & Defence Association of Europe. Retrieved 9 June 2011.[dead link] -Jump up ^ "UK Aerospace Industry Survey – 2010". ADS Group. Retrieved 9 June 2011. -^ Jump up to: a b c d http://www.theengineer.co.uk/aerospace/in-depth/reasons-to-be-cheerful-about-the-uk-aerospace-sector/1017274.article -Jump up ^ "The Pharmaceutical sector in the UK". Department for Business, Innovation & Skills. Retrieved 9 June 2011. -Jump up ^ "Ministerial Industry Strategy Group – Pharmaceutical Industry: Competitiveness and Performance Indicators". Department of Health. Retrieved 9 June 2011.[dead link] -Jump up ^ [1][dead link] -Jump up ^ "UK in recession as economy slides". BBC News. 23 January 2009. Retrieved 23 January 2009. -Jump up ^ "UK youth unemployment at its highest in two decades: 22.5%". MercoPress. 15 April 2012. -Jump up ^ Groom, Brian (19 January 2011). "UK youth unemployment reaches record". Financial Times (London). -Jump up ^ "Release: EU Government Debt and Deficit returns". Office for National Statistics. March 2012. Retrieved 17 August 2012. -Jump up ^ "UK loses top AAA credit rating for first time since 1978". BBC News. 23 February 2013. Retrieved 23 February 2013. -Jump up ^ "Britain sees real wages fall 3.2%". Daily Express (London). 2 March 2013. -Jump up ^ Beckford, Martin (5 December 2011). "Gap between rich and poor growing fastest in Britain". The Daily Telegraph (London). -Jump up ^ "United Kingdom: Numbers in low income". The Poverty Site. Retrieved 25 September 2009. -Jump up ^ "United Kingdom: Children in low income households". The Poverty Site. Retrieved 25 September 2009. -Jump up ^ "Warning of food price hike crisis". BBC News. 4 April 2009. -Jump up ^ Andrews, J. (16 January 2013). "How poor is Britain now". Yahoo! Finance UK -Jump up ^ Glynn, S.; Booth, A. (1996). Modern Britain: An Economic and Social History. London: Routledge. -Jump up ^ "Report highlights 'bleak' poverty levels in the UK" Phys.org, 29 March 2013 -Jump up ^ Gascoin, J. "A reappraisal of the role of the universities in the Scientific Revolution", in Lindberg, David C. and Westman, Robert S., eds (1990), Reappraisals of the Scientific Revolution. Cambridge University Press. p. 248. ISBN 0-521-34804-8. -Jump up ^ Reynolds, E.E.; Brasher, N.H. (1966). Britain in the Twentieth Century, 1900–1964. Cambridge University Press. p. 336. OCLC 474197910 -Jump up ^ Burtt, E.A. (2003) [1924].The Metaphysical Foundations of Modern Science. Mineola, NY: Courier Dover. p. 207. ISBN 0-486-42551-7. -Jump up ^ Hatt, C. (2006). Scientists and Their Discoveries. London: Evans Brothers. pp. 16, 30 and 46. ISBN 0-237-53195-X. -Jump up ^ Jungnickel, C.; McCormmach, R. (1996). Cavendish. American Philosophical Society. ISBN 0-87169-220-1. -Jump up ^ "The Nobel Prize in Physiology or Medicine 1945: Sir Alexander Fleming, Ernst B. Chain, Sir Howard Florey". The Nobel Foundation. Archived from the original on 21 June 2011. -Jump up ^ Hatt, C. (2006). Scientists and Their Discoveries. London: Evans Brothers. p. 56. ISBN 0-237-53195-X. -Jump up ^ James, I. (2010). Remarkable Engineers: From Riquet to Shannon. Cambridge University Press. pp. 33–6. ISBN 0-521-73165-8. -Jump up ^ Bova, Ben (2002) [1932]. The Story of Light. Naperville, IL: Sourcebooks. p. 238. ISBN 978-1-4022-0009-0. -Jump up ^ "Alexander Graham Bell (1847–1922)". Scottish Science Hall of Fame. Archived from the original on 21 June 2011. -Jump up ^ "John Logie Baird (1888–1946)". BBC History. Archived from the original on 21 June 2011. -Jump up ^ Cole, Jeffrey (2011). Ethnic Groups of Europe: An Encyclopedia. Santa Barbara, CA: ABC-CLIO. p. 121. ISBN 1-59884-302-8. -Jump up ^ Castells, M.; Hall, P.; Hall, P.G. (2004). Technopoles of the World: the Making of Twenty-First-Century Industrial Complexes. London: Routledge. pp. 98–100. ISBN 0-415-10015-1. -Jump up ^ "Knowledge, networks and nations: scientific collaborations in the twenty-first century". Royal Society. 2011. Archived from the original on 22 June 2011. -Jump up ^ McCook, Alison. "Is peer review broken?". Reprinted from the Scientist 20(2) 26, 2006. Archived from the original on 21 June 2011. -^ Jump up to: a b "Heathrow 'needs a third runway'". BBC News. 25 June 2008. Retrieved 17 October 2008. -^ Jump up to: a b "Statistics: Top 30 World airports" (Press release). Airports Council International. July 2008. Retrieved 15 October 2008. -Jump up ^ "Transport Statistics Great Britain: 2010". Department for Transport. Archived from the original on 16 December 2010. -Jump up ^ "Major new rail lines considered". BBC News. 21 June 2008. Archived from the original on 9 October 2010. -Jump up ^ "Crossrail's giant tunnelling machines unveiled". BBC News. 2 January 2012. -Jump up ^ Leftly, Mark (29 August 2010). "Crossrail delayed to save £1bn". The Independent on Sunday (London). -^ Jump up to: a b "Size of Reporting Airports October 2009 – September 2010". Civil Aviation Authority. Retrieved 5 December 2010. -Jump up ^ "BMI being taken over by Lufthansa". BBC News. 29 October 2008. Retrieved 23 December 2009. -Jump up ^ "United Kingdom Energy Profile". U.S. Energy Information Administration. Retrieved 4 November 2010. -Jump up ^ Mason, Rowena (24 October 2009). "Let the battle begin over black gold". The Daily Telegraph (London). Retrieved 26 November 2010. -Jump up ^ Heath, Michael (26 November 2010). "RBA Says Currency Containing Prices, Rate Level 'Appropriate' in Near Term". Bloomberg (New York). Retrieved 26 November 2010. -^ Jump up to: a b c "Nuclear Power in the United Kingdom". World Nuclear Association. April 2013. Retrieved 9 April 2013. -^ Jump up to: a b c "United Kingdom – Oil". U.S. Energy Information Administration. Retrieved 4 November 2010.[dead link] -Jump up ^ "Diminishing domestic reserves, escalating imports". EDF Energy. Retrieved 9 April 2013. -^ Jump up to: a b "United Kingdom – Natural Gas". U.S. Energy Information Administration. Retrieved 4 November 2010.[dead link] -^ Jump up to: a b "United Kingdom – Quick Facts Energy Overview". U.S. Energy Information Administration. Retrieved 4 November 2010.[dead link] -Jump up ^ The Coal Authority (10 April 2006). "Coal Reserves in the United Kingdom". The Coal Authority. Archived from the original on 4 January 2009. Retrieved 5 July 2011. -Jump up ^ "England Expert predicts 'coal revolution'". BBC News. 16 October 2007. Retrieved 23 September 2008. -Jump up ^ Watts, Susan (20 March 2012). "Fracking: Concerns over gas extraction regulations". BBC News. Retrieved 9 April 2013. -Jump up ^ "Quit fracking aboot". Friends of the Earth Scotland. Retrieved 9 April 2013. -Jump up ^ "Census Geography". Office for National Statistics. 30 October 2007. Archived from the original on 4 June 2011. Retrieved 14 April 2012. -Jump up ^ "Welcome to the 2011 Census for England and Wales". Office for National Statistics. n.d. Retrieved 11 October 2008. -^ Jump up to: a b c "2011 Census: Population Estimates for the United Kingdom". Office for National Statistics. 27 March 2011. Retrieved 18 December 2012. -^ Jump up to: a b c "Annual Mid-year Population Estimates, 2010". Office for National Statistics. 2011. Retrieved 14 April 2012. -Jump up ^ Batty, David (30 December 2010). "One in six people in the UK today will live to 100, study says". The Guardian (London). -^ Jump up to: a b "2011 UK censuses". Office for National Statistics. Retrieved 18 December 2012. -Jump up ^ "Population: UK population grows to 59.6 million" (Press release). Office for National Statistics. 24 June 2004. Archived from the original on 22 July 2004. Retrieved 14 April 2012. -Jump up ^ Khan, Urmee (16 September 2008). "England is most crowded country in Europe". The Daily Telegraph (London). Retrieved 5 September 2009. -Jump up ^ Carrell, Severin (17 December 2012). "Scotland's population at record high". The Guardian. London. Retrieved 18 December 2012. -^ Jump up to: a b c "Vital Statistics: Population and Health Reference Tables (February 2014 Update): Annual Time Series Data". ONS. Retrieved 27 April 2014. -Jump up ^ Boseley, Sarah (14 July 2008). "The question: What's behind the baby boom?". The Guardian (London). p. 3. Retrieved 28 August 2009. -Jump up ^ Tables, Graphs and Maps Interface (TGM) table. Eurostat (26 February 2013). Retrieved 12 July 2013. -Jump up ^ Campbell, Denis (11 December 2005). "3.6m people in Britain are gay – official". The Observer (London). Retrieved 28 April 2013. -Jump up ^ "2011 Census - Built-up areas". ONS. Retrieved 1 July 2013. -Jump up ^ Mid-2012 Population Estimates for Settlements and Localities in Scotland General Register Office for Scotland -Jump up ^ "Belfast Metropolitan Urban Area NISRA 2005". Retrieved 28 April 2013. -Jump up ^ 2011 Census: KS201UK Ethnic group, local authorities in the United Kingdom, Accessed 21 February 2014 -Jump up ^ "Welsh people could be most ancient in UK, DNA suggests". BBC News. 19 June 2012. Retrieved 28 April 2013. -Jump up ^ Thomas, Mark G. et al. "Evidence for a segregated social structure in early Anglo-Saxon England". Proceedings of the Royal Society B: Biological Sciences 273(1601): 2651–2657. -Jump up ^ Owen, James (19 July 2005). "Review of 'The Tribes of Britain'". National Geographic (Washington DC). -Jump up ^ Oppenheimer, Stephen (October 2006). "Myths of British ancestry" at the Wayback Machine (archived 26 September 2006). Prospect (London). Retrieved 5 November 2010. -Jump up ^ Henderson, Mark (23 October 2009). "Scientist – Griffin hijacked my work to make race claim about 'British aborigines'". The Times (London). Retrieved 26 October 2009. (subscription required) -Jump up ^ Costello, Ray (2001). Black Liverpool: The Early History of Britain's Oldest Black Community 1730–1918. Liverpool: Picton Press. ISBN 1-873245-07-6. -Jump up ^ "Culture and Ethnicity Differences in Liverpool – Chinese Community". Chambré Hardman Trust. Retrieved 26 October 2009. -Jump up ^ Coleman, David; Compton, Paul; Salt, John (2002). "The demographic characteristics of immigrant populations", Council of Europe, p.505. ISBN 92-871-4974-7. -Jump up ^ Mason, Chris (30 April 2008). "'Why I left UK to return to Poland'". BBC News. -Jump up ^ "Resident population estimates by ethnic group (percentages): London". Office for National Statistics. Retrieved 23 April 2008. -Jump up ^ "Resident population estimates by ethnic group (percentages): Leicester". Office for National Statistics. Retrieved 23 April 2008. -Jump up ^ "Census 2001 – Ethnicity and religion in England and Wales". Office for National Statistics. Retrieved 23 April 2008. -Jump up ^ Loveys, Kate (22 June 2011). "One in four primary school pupils are from an ethnic minority and almost a million schoolchildren do not speak English as their first language". Daily Mail (London). Retrieved 28 June 2011. -Jump up ^ Rogers, Simon (19 May 2011). "Non-white British population reaches 9.1 million". The Guardian (London). -Jump up ^ Wallop, Harry (18 May 2011). "Population growth of last decade driven by non-white British". The Daily Telegraph (London). -Jump up ^ "Official EU languages". European Commission. 8 May 2009. Retrieved 16 October 2009. -Jump up ^ "Language Courses in New York". United Nations. 2006. Retrieved 29 November 2010. -Jump up ^ "English language – Government, citizens and rights". Directgov. Retrieved 23 August 2011. -Jump up ^ "Commonwealth Secretariat – UK". Commonwealth Secretariat. Retrieved 23 August 2011. -^ Jump up to: a b c "Languages across Europe: United Kingdom". BBC. Retrieved 4 February 2013. -Jump up ^ Booth, Robert (30 January 2013). "Polish becomes England's second language". The Guardian (London). Retrieved 4 February 2012. -Jump up ^ European Charter for Regional or Minority Languages, Strasbourg, 5.XI.1992 - http://conventions.coe.int/treaty/en/Treaties/Html/148.htm -Jump up ^ Framework Convention for the Protection of National Minorities, Strasbourg, 1.II.1995 - http://conventions.coe.int/Treaty/en/Treaties/Html/157.htm -Jump up ^ National Statistics Online – Welsh Language[dead link]. National Statistics Office. -Jump up ^ "Differences in estimates of Welsh Language Skills". Office for National Statistics. Archived from the original on 12 January 2010. Retrieved 30 December 2008. -Jump up ^ Wynn Thomas, Peter (March 2007). "Welsh today". Voices. BBC. Retrieved 5 July 2011. -Jump up ^ "Scotland's Census 2001 – Gaelic Report". General Register Office for Scotland. Retrieved 28 April 2013. -Jump up ^ "Local UK languages 'taking off'". BBC News. 12 February 2009. -Jump up ^ Edwards, John R. (2010). Minority languages and group identity: cases and categories. John Benjamins. pp. 150–158. ISBN 978-90-272-1866-7. Retrieved 12 March 2011. -Jump up ^ Koch, John T. (2006). Celtic culture: a historical encyclopedia. ABC-CLIO. p. 696. ISBN 978-1-85109-440-0. -Jump up ^ "Language Data – Scots". European Bureau for Lesser-Used Languages. Archived from the original on 23 June 2007. Retrieved 2 November 2008. -Jump up ^ "Fall in compulsory language lessons". BBC News. 4 November 2004. -Jump up ^ "The School Gate for parents in Wales". BBC. Retrieved 28 April 2013. -Jump up ^ Cannon, John, ed. (2nd edn., 2009). A Dictionary of British History. Oxford University Press. p. 144. ISBN 0-19-955037-9. -Jump up ^ Field, Clive D. (November 2009). "British religion in numbers"[dead link]. BRIN Discussion Series on Religious Statistics, Discussion Paper 001. Retrieved 3 June 2011. -Jump up ^ Yilmaz, Ihsan (2005). Muslim Laws, Politics and Society in Modern Nation States: Dynamic Legal Pluralisms in England, Turkey, and Pakistan. Aldershot: Ashgate Publishing. pp. 55–6. ISBN 0-7546-4389-1. -Jump up ^ Brown, Callum G. (2006). Religion and Society in Twentieth-Century Britain. Harlow: Pearson Education. p. 291. ISBN 0-582-47289-X. -Jump up ^ Norris, Pippa; Inglehart, Ronald (2004). Sacred and Secular: Religion and Politics Worldwide. Cambridge University Press. p. 84. ISBN 0-521-83984-X. -Jump up ^ Fergusson, David (2004). Church, State and Civil Society. Cambridge University Press. p. 94. ISBN 0-521-52959-X. -Jump up ^ "UK Census 2001". National Office for Statistics. Archived from the original on 12 March 2007. Retrieved 22 April 2007. -Jump up ^ "Religious Populations". Office for National Statistics. 11 October 2004. Archived from the original on 6 June 2011. -Jump up ^ "United Kingdom: New Report Finds Only One in 10 Attend Church". News.adventist.org. 4 April 2007. Retrieved 12 September 2010. -Jump up ^ Philby, Charlotte (12 December 2012). "Less religious and more ethnically diverse: Census reveals a picture of Britain today". The Independent (London). -Jump up ^ The History of the Church of England. The Church of England. Retrieved 23 November 2008. -Jump up ^ "Queen and Church of England". British Monarchy Media Centre. Archived from the original on 8 October 2006. Retrieved 5 June 2010. -Jump up ^ "Queen and the Church". The British Monarchy (Official Website). Archived from the original on 7 June 2011. -Jump up ^ "How we are organised". Church of Scotland. Archived from the original on 7 June 2011. -Jump up ^ Weller, Paul (2005). Time for a Change: Reconfiguring Religion, State, and Society. London: Continuum. pp. 79–80. ISBN 0567084876. -Jump up ^ Peach, Ceri, "United Kingdom, a major transformation of the religious landscape", in H. Knippenberg. ed. (2005). The Changing Religious Landscape of Europe. Amsterdam: Het Spinhuis. pp. 44–58. ISBN 90-5589-248-3. -Jump up ^ Richards, Eric (2004). Britannia's children: Emigration from England, Scotland, Wales and Ireland since 1600. London: Hambledon, p. 143. ISBN 978-1-85285-441-6. -Jump up ^ Gibney, Matthew J.; Hansen, Randall (2005). Immigration and asylum: from 1900 to the present, ABC-CLIO, p. 630. ISBN 1-57607-796-9 -Jump up ^ "Short history of immigration". BBC. 2005. Retrieved 28 August 2010. -Jump up ^ Rogers, Simon (11 December 2012). "Census 2011 mapped and charted: England & Wales in religion, immigration and race". London: Guardian. Retrieved 11 December 2012. -Jump up ^ 6.5% of the EU population are foreigners and 9.4% are born abroad, Eurostat, Katya Vasileva, 34/2011. -Jump up ^ Muenz, Rainer (June 2006). "Europe: Population and Migration in 2005". Migration Policy Institute. Retrieved 2 April 2007. -Jump up ^ "Immigration and births to non-British mothers pushes British population to record high". London Evening Standard. 21 August 2008. -Jump up ^ Doughty, Steve; Slack, James (3 June 2008). "Third World migrants behind our 2.3m population boom". Daily Mail (London). -Jump up ^ Bentham, Martin (20 October 2008). "Tories call for tougher control of immigration". London Evening Standard. -Jump up ^ "Minister rejects migrant cap plan". BBC News. 8 September 2008. Retrieved 26 April 2011. -Jump up ^ Johnston, Philip (5 January 2007). "Immigration 'far higher' than figures say". The Daily Telegraph (London). Retrieved 20 April 2007. -Jump up ^ Travis, Alan (25 August 2011). "UK net migration rises 21%". The Guardian (London). -^ Jump up to: a b "Migration Statistics Quarterly Report May 2012". Office for National Statistics. 24 May 2012. -Jump up ^ "Migration to UK more than double government target". BBC News. 24 May 2012. -^ Jump up to: a b "Citizenship". Home Office. August 2011. Retrieved 24 October 2011.[dead link] -Jump up ^ Bamber, David (20 December 2000). "Migrant squad to operate in France". The Daily Telegraph (London). -Jump up ^ "Settlement". Home Office. August 2011. Retrieved 24 October 2011.[dead link] -Jump up ^ "Births in England and Wales by parents' country of birth, 2011". Office for National Statistics. 30 August 2012. Retrieved 28 April 2013. -Jump up ^ "Right of Union citizens and their family members to move and reside freely within the territory of the Member States". European Commission. Retrieved 28 April 2013. -Jump up ^ Doward, Jamie; Temko, Ned (23 September 2007). "Home Office shuts the door on Bulgaria and Romania". The Observer (London). p. 2. Retrieved 23 August 2008. -Jump up ^ Sumption, Madeleine; Somerville, Will (January 2010). The UK's new Europeans: Progress and challenges five years after accession. Policy Report (London: Equality and Human Rights Commission). p. 13. ISBN 978-1-84206-252-4. Retrieved 19 January 2010. -Jump up ^ Doward, Jamie; Rogers, Sam (17 January 2010). "Young, self-reliant, educated: portrait of UK's eastern European migrants". The Observer (London). Retrieved 19 January 2010. -Jump up ^ Hopkirk, Elizabeth (20 October 2008). "Packing up for home: Poles hit by UK's economic downturn". London Evening Standard. -Jump up ^ "Migrants to UK 'returning home'". BBC News. 8 September 2009. Retrieved 8 September 2009. -Jump up ^ "UK sees shift in migration trend". BBC News. 27 May 2010. Retrieved 28 May 2010. -Jump up ^ "Fresh Talent: Working in Scotland". London: UK Border Agency. Retrieved 30 October 2010. -Jump up ^ Boxell, James (28 June 2010). "Tories begin consultation on cap for migrants". Financial Times (London). Retrieved 17 September 2010. -Jump up ^ "Vince Cable: Migrant cap is hurting economy". The Guardian (London). Press Association. 17 September 2010. Retrieved 17 September 2010. -Jump up ^ Richards (2004), pp. 6–7. -^ Jump up to: a b Sriskandarajah, Dhananjayan; Drew, Catherine (11 December 2006). "Brits Abroad: Mapping the scale and nature of British emigration". Institute for Public Policy Research. Retrieved 20 January 2007. -Jump up ^ "Brits Abroad: world overview". BBC. n.d. Retrieved 20 April 2007. -Jump up ^ Casciani, Dominic (11 December 2006). "5.5 m Britons 'opt to live abroad'". BBC News. Retrieved 20 April 2007. -Jump up ^ "Brits Abroad: Country-by-country". BBC News. 11 December 2006. -Jump up ^ "Local Authorities". Department for Children, Schools and Families. Retrieved 21 December 2008. -Jump up ^ Gordon, J.C.B. (1981). Verbal Deficit: A Critique. London: Croom Helm. p. 44 note 18. ISBN 978-0-85664-990-5. -Jump up ^ Section 8 ('Duty of local education authorities to secure provision of primary and secondary schools'), Sections 35–40 ('Compulsory attendance at Primary and Secondary Schools') and Section 61 ('Prohibition of fees in schools maintained by local education authorities ...'), Education Act 1944. -Jump up ^ "England's pupils in global top 10". BBC News. 10 December 2008. -Jump up ^ "More state pupils in universities". BBC News. 19 July 2007. -Jump up ^ MacLeod, Donald (9 November 2007). "Private school pupil numbers in decline". The Guardian (London). Retrieved 31 March 2010. -Jump up ^ Frankel, Hannah (3 September 2010). "Is Oxbridge still a preserve of the posh?". TES (London). Retrieved 9 April 2013. -Jump up ^ "World's top 100 universities 2013: their reputations ranked by Times Higher Education". The Guardian (London). 2013. Retrieved 23 October 2014. -Jump up ^ Davenport, F.; Beech, C.; Downs, T.; Hannigan, D. (2006). Ireland. Lonely Planet, 7th edn. ISBN 1-74059-968-3. p. 564. -Jump up ^ "About SQA". Scottish Qualifications Authority. 10 April 2013. Retrieved 28 April 2013. -Jump up ^ "About Learning and Teaching Scotland". Learning and Teaching Scotland. Retrieved 28 April 2013. -Jump up ^ "Brain drain in reverse". Scotland Online Gateway. July 2002. Archived from the original on 4 December 2007. -Jump up ^ "Increase in private school intake". BBC News. 17 April 2007. -Jump up ^ "MSPs vote to scrap endowment fee". BBC News. 28 February 2008. -Jump up ^ What will your child learn?[dead link] The Welsh Assembly Government. Retrieved 22 January 2010. -Jump up ^ CCEA. "About Us – What we do". Council for the Curriculum Examinations & Assessment. Retrieved 28 April 2013. -Jump up ^ Elitist Britain?, Social Mobility and Child Poverty Commission, 28 August 2014 -Jump up ^ Arnett, George (28 August 2014). "Elitism in Britain - breakdown by profession". The Guardian: Datablog. -Jump up ^ Haden, Angela; Campanini, Barbara, eds. (2000). The world health report 2000 – Health systems: improving performance. Geneva: World Health Organisation. ISBN 92-4-156198-X. Retrieved 5 July 2011. -Jump up ^ World Health Organization. "Measuring overall health system performance for 191 countries". New York University. Retrieved 5 July 2011. -Jump up ^ "'Huge contrasts' in devolved NHS". BBC News. 28 August 2008. -Jump up ^ Triggle, Nick (2 January 2008). "NHS now four different systems". BBC News. -Jump up ^ Fisher, Peter. "The NHS from Thatcher to Blair". NHS Consultants Association (International Association of Health Policy). The Budget ... was even more generous to the NHS than had been expected amounting to an annual rise of 7.4% above the rate of inflation for the next 5 years. This would take us to 9.4% of GDP spent on health ie around EU average. -Jump up ^ "OECD Health Data 2009 – How Does the United Kingdom Compare". Paris: Organisation for Economic Co-operation and Development. Retrieved 28 April 2013.[dead link] -Jump up ^ "The cultural superpower: British cultural projection abroad". Journal of the British Politics Society, Norway. Volume 6. No. 1. Winter 2011 -Jump up ^ Sheridan, Greg (15 May 2010). "Cameron has chance to make UK great again". The Australian (Sydney). Retrieved 20 May 2012. -Jump up ^ Goldfarb, Jeffrey (10 May 2006). "Bookish Britain overtakes America as top publisher". RedOrbit (Texas). Reuters. -Jump up ^ "William Shakespeare (English author)". Britannica Online encyclopedia. Retrieved 26 February 2006. -Jump up ^ MSN Encarta Encyclopedia article on Shakespeare. Archived from the original on 9 February 2006. Retrieved 26 February 2006. -Jump up ^ William Shakespeare. Columbia Electronic Encyclopedia. Retrieved 26 February 2006. -Jump up ^ "Mystery of Christie's success is solved". The Daily Telegraph (London). 19 December 2005. Retrieved 14 November 2010. -Jump up ^ "All-Time Essential Comics". IGN. Retrieved 15 August 2013. -Jump up ^ Johnston, Rich."Before Watchmen To Double Up For Hardcover Collections". Bleeding Cool. 10 December 2012. Retrieved 15 August 2013. -Jump up ^ "Edinburgh, UK appointed first UNESCO City of Literature". Unesco. 2004. Retrieved 28 April 2013.[dead link] -Jump up ^ "Early Welsh poetry". BBC Wales. Retrieved 29 December 2010. -Jump up ^ Lang, Andrew (2003) [1913]. History of English Literature from Beowulf to Swinburne. Holicong, PA: Wildside Press. p. 42. ISBN 978-0-8095-3229-2. -Jump up ^ "Dafydd ap Gwilym". Academi website. Academi. 2011. Retrieved 3 January 2011. Dafydd ap Gwilym is widely regarded as one of the greatest Welsh poets of all time, and amongst the leading European poets of the Middle Ages. -Jump up ^ True birthplace of Wales's literary hero. BBC News. Retrieved 28 April 2012 -Jump up ^ Kate Roberts: Biography at the Wayback Machine. BBC Wales. Retrieved 28 April 2012 -Jump up ^ Swift, Jonathan; Fox, Christopher (1995). Gulliver's travels: complete, authoritative text with biographical and historical contexts, critical history, and essays from five contemporary critical perspectives. Basingstoke: Macmillan. p. 10. ISBN 978-0-333-63438-7. -Jump up ^ "Bram Stoker." (PDF). The New York Times. 23 April 1912. Retrieved 1 January 2011. -^ Jump up to: a b "1960–1969". EMI Group. Retrieved 31 May 2008. -^ Jump up to: a b "Paul At Fifty". Time (New York). 8 June 1992. -^ Jump up to: a b Most Successful Group The Guinness Book of Records 1999, p. 230. Retrieved 19 March 2011. -Jump up ^ "British Citizen by Act of Parliament: George Frideric Handel". UK Parliament. 20 July 2009. Retrieved 11 September 2009.[dead link] -Jump up ^ Andrews, John (14 April 2006). "Handel all'inglese". Playbill (New York). Retrieved 11 September 2009. -Jump up ^ Citron, Stephen (2001). Sondheim and Lloyd-Webber: The new musical. London: Chatto & Windus. ISBN 978-1-85619-273-6. -Jump up ^ "Beatles a big hit with downloads". Belfast Telegraph. 25 November 2010. Retrieved 16 May 2011. -Jump up ^ "British rock legends get their own music title for PlayStation3 and PlayStation2" (Press release). EMI. 2 February 2009. -Jump up ^ Khan, Urmee (17 July 2008). "Sir Elton John honoured in Ben and Jerry ice cream". The Daily Telegraph (London). -Jump up ^ Alleyne, Richard (19 April 2008). "Rock group Led Zeppelin to reunite". The Daily Telegraph (London). Retrieved 31 March 2010. -Jump up ^ Fresco, Adam (11 July 2006). "Pink Floyd founder Syd Barrett dies at home". The Times (London). Retrieved 31 March 2010. (subscription required) -Jump up ^ Holton, Kate (17 January 2008). "Rolling Stones sign Universal album deal". Reuters. Retrieved 26 October 2008. -Jump up ^ Walker, Tim (12 May 2008). "Jive talkin': Why Robin Gibb wants more respect for the Bee Gees". The Independent (London). Retrieved 26 October 2008. -Jump up ^ "Brit awards winners list 2012: every winner since 1977". The Guardian (London). Retrieved 28 February 2012. -Jump up ^ Corner, Lewis (16 February 2012). "Adele, Coldplay biggest-selling UK artists worldwide in 2011". Digital Spy. Retrieved 22 March 2012. -Jump up ^ Hughes, Mark (14 January 2008). "A tale of two cities of culture: Liverpool vs Stavanger". The Independent (London). Retrieved 2 August 2009. -Jump up ^ "Glasgow gets city of music honour". BBC News. 20 August 2008. Retrieved 2 August 2009. -Jump up ^ Bayley, Stephen (24 April 2010). "The startling success of Tate Modern". The Times (London). Retrieved 19 January 2011. (subscription required) -Jump up ^ "Vertigo is named 'greatest film of all time'". BBC News. 2 August 2012. Retrieved 18 August 2012. -Jump up ^ "The Directors' Top Ten Directors". British Film Institute. Archived from the original on 27 May 2012. -Jump up ^ "Chaplin, Charles (1889–1977)". British Film Institute. Retrieved 25 January 2011. -Jump up ^ "Powell, Michael (1905–1990)". British Film Institute. Retrieved 25 January 2011. -Jump up ^ "Reed, Carol (1906–1976)". British Film Institute. Retrieved 25 January 2011. -Jump up ^ "Scott, Sir Ridley (1937–)". British Film Institute. Retrieved 25 January 2011. -Jump up ^ "Andrews, Julie (1935–)". British Film Institute. Retrieved 11 December 2010. -Jump up ^ "Burton, Richard (1925–1984)". British Film Institute. Retrieved 11 December 2010. -Jump up ^ "Caine, Michael (1933–)". British Film Institute. Retrieved 11 December 2010. -Jump up ^ "Chaplin, Charles (1889–1977)". British Film Institute. Retrieved 11 December 2010. -Jump up ^ "Connery, Sean (1930–)". British Film Institute. Retrieved 11 December 2010. -Jump up ^ "Leigh, Vivien (1913–1967)". British Film Institute. Retrieved 11 December 2010. -Jump up ^ "Niven, David (1910–1983)". British Film Institute. Retrieved 11 December 2010. -Jump up ^ "Olivier, Laurence (1907–1989)". British Film Institute. Retrieved 11 December 2010. -Jump up ^ "Sellers, Peter (1925–1980)". British Film Institute. Retrieved 11 December 2010. -Jump up ^ "Winslet, Kate (1975–)". British Film Institute. Retrieved 11 December 2010. -Jump up ^ "Daniel Day-Lewis makes Oscar history with third award"'. BBC News. Retrieved 15 August 2013 -Jump up ^ "Harry Potter becomes highest-grossing film franchise". The Guardian (London). 11 September 2007. Retrieved 2 November 2010. -Jump up ^ "History of Ealing Studios". Ealing Studios. Retrieved 5 June 2010. -^ Jump up to: a b "UK film – the vital statistics". UK Film Council. Retrieved 22 October 2010.[dead link] -Jump up ^ "The BFI 100". British Film Institute. 6 September 2006. Archived from the original on 1 April 2011. -Jump up ^ "Baftas fuel Oscars race". BBC News. 26 February 2001. Retrieved 14 February 2011. -^ Jump up to: a b "BBC: World's largest broadcaster & Most trusted media brand". Media Newsline. Archived from the original on 5 October 2010. Retrieved 23 September 2010. -^ Jump up to: a b "Digital licence". Prospect. Retrieved 23 September 2010. -^ Jump up to: a b "About the BBC – What is the BBC". BBC Online. Retrieved 23 September 2010. -Jump up ^ Newswire7 (13 August 2009). "BBC: World's largest broadcaster & Most trusted media brand". Media Newsline. Archived from the original on 17 June 2011. -Jump up ^ "TV Licence Fee: facts & figures". BBC Press Office. April 2010. Archived from the original on 17 June 2011. -Jump up ^ "Publications & Policies: The History of ITV". ITV.com. Archived from the original on 17 June 2011. -Jump up ^ "Publishing". News Corporation. Archived from the original on 17 June 2011. -Jump up ^ "Direct Broadcast Satellite Television". News Corporation. Archived from the original on 17 June 2011. -Jump up ^ William, D. (2010). UK Cities: A Look at Life and Major Cities in England, Scotland, Wales and Northern Ireland. Eastbourne: Gardners Books. ISBN 978-9987-16-021-1, pp. 22, 46, 109 and 145. -Jump up ^ "Publishing". Department of Culture, Media and Sport. Archived from the original on 17 June 2011. -Jump up ^ Ofcom "Communication Market Report 2010", 19 August 2010, pp. 97, 164 and 191 -Jump up ^ "Social Trends: Lifestyles and social participation". Office for National Statistics. 16 February 2010. Archived from the original on 17 June 2011. -Jump up ^ "Top 20 countries with the highest number of Internet users". Internet World Stats. Archived from the original on 17 June 2011. -Jump up ^ Fieser, James, ed. (2000). A bibliography of Scottish common sense philosophy: Sources and origins. Bristol: Thoemmes Press. Retrieved 17 December 2010. -Jump up ^ Palmer, Michael (1999). Moral Problems in Medicine: A Practical Coursebook. Cambridge: Lutterworth Press. p. 66. ISBN 978-0-7188-2978-0. -Jump up ^ Scarre, Geoffrey (1995). Utilitarianism. London: Routledge. p. 82. ISBN 978-0-415-12197-2. -Jump up ^ Gysin, Christian (9 March 2007). "Wembley kick-off: Stadium is ready and England play first game in fortnight". Daily Mail (London). Retrieved 19 March 2007. -Jump up ^ "Opening ceremony of the games of the XXX Olympiad". Olympic.org. Retrieved 30 November 2013 -Jump up ^ "Unparalleled Sporting History" . Reuters. Retrieved 30 November 2013 -Jump up ^ "Rugby Union 'Britain's Second Most Popular Sport'". Ipsos-Mori. 22 December 2003. Retrieved 28 April 2013. -Jump up ^ Ebner, Sarah (2 July 2013). "History and time are key to power of football, says Premier League chief". The Times (London). Retrieved 30 November 2013. -Jump up ^ Mitchell, Paul (November 2005). "The first international football match". BBC Sport Scotland. Retrieved 15 December 2013. -Jump up ^ "Why is there no GB Olympics football team?". BBC Sport. 5 August 2008. Retrieved 31 December 2010. -Jump up ^ "Blatter against British 2012 team". BBC News. 9 March 2008. Retrieved 2 April 2008. -Jump up ^ "About ECB". England and Wales Cricket Board. n.d. Retrieved 28 April 2013. -Jump up ^ McLaughlin, Martyn (4 August 2009). "Howzat happen? England fields a Gaelic-speaking Scotsman in Ashes". The Scotsman (Edinburgh). Retrieved 30 December 2010. -Jump up ^ "Uncapped Joyce wins Ashes call up". BBC Sport. 15 November 2006. Retrieved 30 December 2010. -Jump up ^ "Glamorgan". BBC South East Wales. August 2009. Retrieved 30 December 2010. -Jump up ^ Ardener, Shirley (2007). Professional identities: policy and practice in business and bureaucracy. New York: Berghahn. p. 27. ISBN 978-1-84545-054-0. -Jump up ^ "Official Website of Rugby League World Cup 2008". Archived from the original on 16 October 2007. -Jump up ^ Louw, Jaco; Nesbit, Derrick (2008). The Girlfriends Guide to Rugby. Johannesburg: South Publishers. ISBN 978-0-620-39541-0. -Jump up ^ "Triple Crown". RBS 6 Nations. Retrieved 6 March 2011. -Jump up ^ "Tracking the Field". Ipsos MORI. Archived from the original on 5 February 2009. Retrieved 17 October 2008. -Jump up ^ "Links plays into the record books". BBC News. 17 March 2009. -Jump up ^ Chowdhury, Saj (22 January 2007). "China in Ding's hands". BBC Sport. Retrieved 2 January 2011. -Jump up ^ "Lawn Tennis and Major T.Gem". The Birmingham Civic Society. Archived from the original on 18 August 2011. Retrieved 31 December 2010. -Jump up ^ Gould, Joe (10 April 2007). "The ancient Irish sport of hurling catches on in America". Columbia News Service (Columbia Journalism School). Retrieved 17 May 2011. -Jump up ^ "Shinty". Scottishsport.co.uk. Retrieved 28 April 2013. -Jump up ^ "Welsh dragon call for Union flag". BBC News. 27 November 2007. Retrieved 17 October 2008. -Jump up ^ "Britannia on British Coins". Chard. Retrieved 25 June 2006. -Jump up ^ Baker, Steve (2001). Picturing the Beast. University of Illinois Press. p. 52. ISBN 0-252-07030-5. -Further reading -Hitchens, Peter (2000). The Abolition of Britain: from Winston Churchill to Princess Diana. Second ed. San Francisco, Calif.: Encounter Books. xi, 332 p. ISBN 1-893554-18-X. -Lambert, Richard S. (1964). The Great Heritage: a History of Britain for Canadians. House of Grant, 1964 (and earlier editions and/or printings). -External links -Find more about -United Kingdom -at Wikipedia's sister projects -Search Wiktionary Definitions from Wiktionary -Search Commons Media from Commons -Search Wikinews News stories from Wikinews -Search Wikiquote Quotations from Wikiquote -Search Wikisource Source texts from Wikisource -Search Wikibooks Textbooks from Wikibooks -Search Wikivoyage Travel guide from Wikivoyage -Search Wikiversity Learning resources from Wikiversity -Government -Official website of HM Government -Official website of the British Monarchy -Official Yearbook of the United Kingdom statistics -The official site of the British Prime Minister's Office -General information -United Kingdom from the BBC News -United Kingdom entry at The World Factbook -United Kingdom from UCB Libraries GovPubs -United Kingdom at DMOZ -United Kingdom Encyclopædia Britannica entry -United Kingdom from the OECD -United Kingdom at the EU - Wikimedia Atlas of United Kingdom - Geographic data related to United Kingdom at OpenStreetMap -Key Development Forecasts for the United Kingdom from International Futures -Travel -Official tourist guide to Britain -[hide] v t e -United Kingdom topics -History -Chronology -Formation Georgian era Victorian era Edwardian era World War I Interwar World War II UK since 1945 (Postwar Britain) -By topic -Economic Empire Maritime Military -Geography -Administrative -Countries of the United Kingdom Crown dependencies Overseas territories City status Towns Former colonies -Physical -British Isles terminology Great Britain Geology Northern Ireland Lakes and lochs Mountains Rivers Volcanoes -Resources -Energy/Renewable energy Biodiesel Coal Geothermal Hydraulic frac. Hydroelectricity Marine North Sea oil Solar Wind Food Agriculture Fishing English Scottish Hunting Materials Flora Forestry Mining -Politics -Constitution Courts Elections Foreign relations Judiciary Law Law enforcement Legislation Monarchy monarchs Nationality Parliament House of Commons House of Lords Political parties -Government -Cabinet list Civil service Departments Prime Minister list -Military -Royal Navy Army Royal Air Force Weapons of mass destruction -Economy -Banks Bank of England Budget Economic geography Pound (currency) Stock Exchange Taxation Telecommunications Tourism Transport -Society -Affordability of housing Crime Demography Drug policy Education Ethnic groups Health care Immigration Languages Poverty Food banks Prostitution Public holidays Social care Social structure -Culture -Art Cinema Cuisine Identity Literature Media television Music Religion Sport Symbols Theatre -[show] -Countries of the United Kingdom -Outline Index -Book Category Portal WikiProject -[show] -Gnome-globe.svg Geographic locale -[show] v t e -Member states of the European Union -[show] -International organisations -[show] v t e -English-speaking world -[show] v t e -National personifications -Coordinates: 55°N 3°W -Categories: United KingdomBritish IslandsConstitutional monarchiesCountries in EuropeEnglish-speaking countries and territoriesG20 nationsG7 nationsG8 nationsIsland countriesLiberal democraciesMember states of NATOMember states of the Commonwealth of NationsMember states of the Council of EuropeMember states of the European UnionMember states of the Union for the MediterraneanMember states of the United NationsNorthern EuropeWestern Europe -Navigation menu -Create accountLog inArticleTalkReadView sourceView history - -Main page -Contents -Featured content -Current events -Random article -Donate to Wikipedia -Wikimedia Shop -Interaction -Help -About Wikipedia -Community portal -Recent changes -Contact page -Tools -What links here -Related changes -Upload file -Special pages -Permanent link -Page information -Wikidata item -Cite this page -Print/export -Create a book -Download as PDF -Printable version -Languages -Адыгэбзэ -Afrikaans -Akan -Alemannisch -አማርኛ -Ænglisc -Аҧсшәа -العربية -Aragonés -ܐܪܡܝܐ -Armãneashti -Arpetan -Asturianu -Avañe'ẽ -Авар -Azərbaycanca -বাংলা -Bahasa Banjar -Bân-lâm-gú -Башҡортса -Беларуская -Беларуская (тарашкевіца)‎ -भोजपुरी -Bikol Central -Bislama -Български -Boarisch -བོད་ཡིག -Bosanski -Brezhoneg -Буряад -Català -Чӑвашла -Cebuano -Čeština -Chavacano de Zamboanga -ChiShona -Corsu -Cymraeg -Dansk -Deutsch -ދިވެހިބަސް -Diné bizaad -Dolnoserbski -ཇོང་ཁ -Eesti -Ελληνικά -Emiliàn e rumagnòl -Español -Esperanto -Estremeñu -Euskara -فارسی -Fiji Hindi -Føroyskt -Français -Frysk -Furlan -Gaeilge -Gaelg -Gagauz -Gàidhlig -Galego -贛語 -ગુજરાતી -客家語/Hak-kâ-ngî -Хальмг -한국어 -Hausa -Hawaii -Հայերեն -हिन्दी -Hornjoserbsce -Hrvatski -Ido -Igbo -Ilokano -বিষ্ণুপ্রিয়া মণিপুরী -Bahasa Indonesia -Interlingua -Interlingue -Ирон -IsiZulu -Íslenska -Italiano -עברית -Basa Jawa -Kalaallisut -ಕನ್ನಡ -Kapampangan -Къарачай-малкъар -ქართული -Kaszëbsczi -Қазақша -Kernowek -Kinyarwanda -Kiswahili -Коми -Kongo -Kreyòl ayisyen -Kurdî -Кыргызча -Кырык мары -Ladino -Лезги -ລາວ -Latgaļu -Latina -Latviešu -Lëtzebuergesch -Lietuvių -Ligure -Limburgs -Lingála -Lojban -Lumbaart -Magyar -Македонски -Malagasy -മലയാളം -Malti -Māori -मराठी -მარგალური -مصرى -مازِرونی -Bahasa Melayu -Mìng-dĕ̤ng-ngṳ̄ -Mirandés -Монгол -မြန်မာဘာသာ -Nāhuatl -Dorerin Naoero -Nederlands -Nedersaksies -नेपाली -नेपाल भाषा -日本語 -Napulitano -Нохчийн -Nordfriisk -Norfuk / Pitkern -Norsk bokmål -Norsk nynorsk -Nouormand -Novial -Occitan -Олык марий -ଓଡ଼ିଆ -Oromoo -Oʻzbekcha -ਪੰਜਾਬੀ -Pangasinan -پنجابی -Papiamentu -پښتو -Перем Коми -ភាសាខ្មែរ -Picard -Piemontèis -Tok Pisin -Plattdüütsch -Polski -Ποντιακά -Português -Qırımtatarca -Reo tahiti -Ripoarisch -Română -Romani -Rumantsch -Runa Simi -Русиньскый -Русский -Саха тыла -Sámegiella -संस्कृतम् -Sardu -Scots -Seeltersk -Shqip -Sicilianu -සිංහල -Simple English -SiSwati -Slovenčina -Slovenščina -Словѣньскъ / ⰔⰎⰑⰂⰡⰐⰠⰔⰍⰟ -Ślůnski -Soomaaliga -کوردی -Sranantongo -Српски / srpski -Srpskohrvatski / српскохрватски -Basa Sunda -Suomi -Svenska -Tagalog -தமிழ் -Taqbaylit -Tarandíne -Татарча/tatarça -తెలుగు -Tetun -ไทย -Тоҷикӣ -ᏣᎳᎩ -Tsetsêhestâhese -Türkçe -Twi -Удмурт -ᨅᨔ ᨕᨘᨁᨗ -Українська -اردو -ئۇيغۇرچە / Uyghurche -Vahcuengh -Vèneto -Vepsän kel’ -Tiếng Việt -Volapük -Võro -Walon -文言 -West-Vlams -Winaray -Wolof -吴语 -ייִדיש -Yorùbá -粵語 -Zazaki -Zeêuws -Žemaitėška -中文 -Edit links -This page was last modified on 22 November 2014 at 11:19. -Text is available under the Creative Commons Attribution-ShareAlike License; additional terms may apply. By using this site, you agree to the Terms of Use and Privacy Policy. Wikipedia® is a registered trademark of the Wikimedia Foundation, Inc., a non-profit organization. -Privacy policyAbout WikipediaDisclaimersContact WikipediaDevelopersMobile viewWikimedia Foundation Powered by MediaWiki - - -World Trade Organization -From Wikipedia, the free encyclopedia -"WTO" redirects here. For other uses, see WTO (disambiguation). -World Trade Organization (English) -Organisation mondiale du commerce (French) -Organización Mundial del Comercio (Spanish) -World Trade Organization (logo and wordmark).svg -Official logo of WTO -WTO members and observers.svg - Members - Members, dually represented by the EU - Observers - Non-members -Abbreviation WTO -Formation 1 January 1995; 19 years ago -Type International trade organization -Purpose Liberalize international trade -Headquarters Centre William Rappard, Geneva, Switzerland -Coordinates 46.12°N 6.09°ECoordinates: 46.12°N 6.09°E -Region served Worldwide -Membership 160 member states[1] -Official language English, French, Spanish[2] -Director-General Roberto Azevêdo -Budget 196 million Swiss francs (approx. 209 million US$) in 2011.[3] -Staff 640[4] -Website www.wto.org -The World Trade Organization (WTO) is an organization that intends to supervise and liberalize international trade. The organization officially commenced on 1 January 1995 under the Marrakech Agreement, replacing the General Agreement on Tariffs and Trade (GATT), which commenced in 1948.[5] The organization deals with regulation of trade between participating countries by providing a framework for negotiating and formalizing trade agreements and a dispute resolution process aimed at enforcing participants' adherence to WTO agreements, which are signed by representatives of member governments[6]:fol.9–10 and ratified by their parliaments.[7] Most of the issues that the WTO focuses on derive from previous trade negotiations, especially from the Uruguay Round (1986–1994). -The organization is attempting to complete negotiations on the Doha Development Round, which was launched in 2001 with an explicit focus on addressing the needs of developing countries. As of June 2012, the future of the Doha Round remained uncertain: the work programme lists 21 subjects in which the original deadline of 1 January 2005 was missed, and the round is still incomplete.[8] The conflict between free trade on industrial goods and services but retention of protectionism on farm subsidies to domestic agricultural sector (requested by developed countries) and the substantiation of the international liberalization of fair trade on agricultural products (requested by developing countries) remain the major obstacles. These points of contention have hindered any progress to launch new WTO negotiations beyond the Doha Development Round. As a result of this impasse, there has been an increasing number of bilateral free trade agreements signed.[9] As of July 2012, there were various negotiation groups in the WTO system for the current agricultural trade negotiation which is in the condition of stalemate.[10] -WTO's current Director-General is Roberto Azevêdo,[11][12] who leads a staff of over 600 people in Geneva, Switzerland.[13] A trade facilitation agreement known as the Bali Package was reached by all members on 7 December 2013, the first comprehensive agreement in the organization's history.[14][15] -Contents [hide] -1 History -1.1 GATT rounds of negotiations -1.1.1 From Geneva to Tokyo -1.1.2 Uruguay Round -1.2 Ministerial conferences -1.3 Doha Round (Doha Agenda) -2 Functions -3 Principles of the trading system -4 Organizational structure -5 Decision-making -6 Dispute settlement -7 Accession and membership -7.1 Accession process -7.2 Members and observers -8 Agreements -9 Office of director-general -9.1 List of directors-general -10 See also -11 Notes and references -12 External links -History - -The economists Harry White (left) and John Maynard Keynes at the Bretton Woods Conference. Both had been strong advocates of a central-controlled international trade environment and recommended the establishment of three institutions: the IMF (for fiscal and monetary issues); the World Bank (for financial and structural issues); and the ITO (for international economic cooperation).[16] -The WTO's predecessor, the General Agreement on Tariffs and Trade (GATT), was established after World War II in the wake of other new multilateral institutions dedicated to international economic cooperation – notably the Bretton Woods institutions known as the World Bank and the International Monetary Fund. A comparable international institution for trade, named the International Trade Organization was successfully negotiated. The ITO was to be a United Nations specialized agency and would address not only trade barriers but other issues indirectly related to trade, including employment, investment, restrictive business practices, and commodity agreements. But the ITO treaty was not approved by the U.S. and a few other signatories and never went into effect.[17][18][19] -In the absence of an international organization for trade, the GATT would over the years "transform itself" into a de facto international organization.[20] -GATT rounds of negotiations -See also: General Agreement on Tariffs and Trade -The GATT was the only multilateral instrument governing international trade from 1946 until the WTO was established on 1 January 1995.[21] Despite attempts in the mid-1950s and 1960s to create some form of institutional mechanism for international trade, the GATT continued to operate for almost half a century as a semi-institutionalized multilateral treaty regime on a provisional basis.[22] -From Geneva to Tokyo -Seven rounds of negotiations occurred under GATT. The first real GATT trade rounds concentrated on further reducing tariffs. Then, the Kennedy Round in the mid-sixties brought about a GATT anti-dumping Agreement and a section on development. The Tokyo Round during the seventies was the first major attempt to tackle trade barriers that do not take the form of tariffs, and to improve the system, adopting a series of agreements on non-tariff barriers, which in some cases interpreted existing GATT rules, and in others broke entirely new ground. Because these plurilateral agreements were not accepted by the full GATT membership, they were often informally called "codes". Several of these codes were amended in the Uruguay Round, and turned into multilateral commitments accepted by all WTO members. Only four remained plurilateral (those on government procurement, bovine meat, civil aircraft and dairy products), but in 1997 WTO members agreed to terminate the bovine meat and dairy agreements, leaving only two.[21] -Uruguay Round -Main article: Uruguay Round - -During the Doha Round, the US government blamed Brazil and India for being inflexible and the EU for impeding agricultural imports.[23] The then-President of Brazil, Luiz Inácio Lula da Silva (above right), responded to the criticisms by arguing that progress would only be achieved if the richest countries (especially the US and countries in the EU) made deeper cuts in agricultural subsidies and further opened their markets for agricultural goods.[24] -Well before GATT's 40th anniversary, its members concluded that the GATT system was straining to adapt to a new globalizing world economy.[25][26] In response to the problems identified in the 1982 Ministerial Declaration (structural deficiencies, spill-over impacts of certain countries' policies on world trade GATT could not manage etc.), the eighth GATT round – known as the Uruguay Round – was launched in September 1986, in Punta del Este, Uruguay.[25] -It was the biggest negotiating mandate on trade ever agreed: the talks were going to extend the trading system into several new areas, notably trade in services and intellectual property, and to reform trade in the sensitive sectors of agriculture and textiles; all the original GATT articles were up for review.[26] The Final Act concluding the Uruguay Round and officially establishing the WTO regime was signed 15 April 1994, during the ministerial meeting at Marrakesh, Morocco, and hence is known as the Marrakesh Agreement.[27] -The GATT still exists as the WTO's umbrella treaty for trade in goods, updated as a result of the Uruguay Round negotiations (a distinction is made between GATT 1994, the updated parts of GATT, and GATT 1947, the original agreement which is still the heart of GATT 1994).[25] GATT 1994 is not however the only legally binding agreement included via the Final Act at Marrakesh; a long list of about 60 agreements, annexes, decisions and understandings was adopted. The agreements fall into a structure with six main parts: -The Agreement Establishing the WTO -Goods and investment – the Multilateral Agreements on Trade in Goods including the GATT 1994 and the Trade Related Investment Measures (TRIMS) -Services — the General Agreement on Trade in Services -Intellectual property – the Agreement on Trade-Related Aspects of Intellectual Property Rights (TRIPS) -Dispute settlement (DSU) -Reviews of governments' trade policies (TPRM)[28] -In terms of the WTO's principle relating to tariff "ceiling-binding" (No. 3), the Uruguay Round has been successful in increasing binding commitments by both developed and developing countries, as may be seen in the percentages of tariffs bound before and after the 1986–1994 talks.[29] -Ministerial conferences - -The World Trade Organization Ministerial Conference of 1998, in the Palace of Nations (Geneva, Switzerland). -The highest decision-making body of the WTO is the Ministerial Conference, which usually meets every two years. It brings together all members of the WTO, all of which are countries or customs unions. The Ministerial Conference can take decisions on all matters under any of the multilateral trade agreements. The inaugural ministerial conference was held in Singapore in 1996. Disagreements between largely developed and developing economies emerged during this conference over four issues initiated by this conference, which led to them being collectively referred to as the "Singapore issues". The second ministerial conference was held in Geneva in Switzerland. The third conference in Seattle, Washington ended in failure, with massive demonstrations and police and National Guard crowd-control efforts drawing worldwide attention. The fourth ministerial conference was held in Doha in the Persian Gulf nation of Qatar. The Doha Development Round was launched at the conference. The conference also approved the joining of China, which became the 143rd member to join. The fifth ministerial conference was held in Cancún, Mexico, aiming at forging agreement on the Doha round. An alliance of 22 southern states, the G20 developing nations (led by India, China,[30] Brazil, ASEAN led by the Philippines), resisted demands from the North for agreements on the so-called "Singapore issues" and called for an end to agricultural subsidies within the EU and the US. The talks broke down without progress. -The sixth WTO ministerial conference was held in Hong Kong from 13–18 December 2005. It was considered vital if the four-year-old Doha Development Round negotiations were to move forward sufficiently to conclude the round in 2006. In this meeting, countries agreed to phase out all their agricultural export subsidies by the end of 2013, and terminate any cotton export subsidies by the end of 2006. Further concessions to developing countries included an agreement to introduce duty-free, tariff-free access for goods from the Least Developed Countries, following the Everything but Arms initiative of the European Union — but with up to 3% of tariff lines exempted. Other major issues were left for further negotiation to be completed by the end of 2010. The WTO General Council, on 26 May 2009, agreed to hold a seventh WTO ministerial conference session in Geneva from 30 November-3 December 2009. A statement by chairman Amb. Mario Matus acknowledged that the prime purpose was to remedy a breach of protocol requiring two-yearly "regular" meetings, which had lapsed with the Doha Round failure in 2005, and that the "scaled-down" meeting would not be a negotiating session, but "emphasis will be on transparency and open discussion rather than on small group processes and informal negotiating structures". The general theme for discussion was "The WTO, the Multilateral Trading System and the Current Global Economic Environment"[31] -Doha Round (Doha Agenda) -Main article: Doha Development Round - -The Doha Development Round started in 2001 is at an impasse. -The WTO launched the current round of negotiations, the Doha Development Round, at the fourth ministerial conference in Doha, Qatar in November 2001. This was to be an ambitious effort to make globalization more inclusive and help the world's poor, particularly by slashing barriers and subsidies in farming.[32] The initial agenda comprised both further trade liberalization and new rule-making, underpinned by commitments to strengthen substantial assistance to developing countries.[33] -The negotiations have been highly contentious. Disagreements still continue over several key areas including agriculture subsidies, which emerged as critical in July 2006.[34] According to a European Union statement, "The 2008 Ministerial meeting broke down over a disagreement between exporters of agricultural bulk commodities and countries with large numbers of subsistence farmers on the precise terms of a 'special safeguard measure' to protect farmers from surges in imports."[35] The position of the European Commission is that "The successful conclusion of the Doha negotiations would confirm the central role of multilateral liberalisation and rule-making. It would confirm the WTO as a powerful shield against protectionist backsliding."[33] An impasse remains and, as of August 2013, agreement has not been reached, despite intense negotiations at several ministerial conferences and at other sessions. On 27 March 2013, the chairman of agriculture talks announced "a proposal to loosen price support disciplines for developing countries’ public stocks and domestic food aid." He added: “...we are not yet close to agreement—in fact, the substantive discussion of the proposal is only beginning.”[36] -[show]v · t · eGATT and WTO trade rounds[37] -Functions -Among the various functions of the WTO, these are regarded by analysts as the most important: -It oversees the implementation, administration and operation of the covered agreements.[38][39] -It provides a forum for negotiations and for settling disputes.[40][41] -Additionally, it is the WTO's duty to review and propagate the national trade policies, and to ensure the coherence and transparency of trade policies through surveillance in global economic policy-making.[39][41] Another priority of the WTO is the assistance of developing, least-developed and low-income countries in transition to adjust to WTO rules and disciplines through technical cooperation and training.[42] -(i) The WTO shall facilitate the implementation, administration and operation and further the objec­tives of this Agreement and of the Multilateral Trade Agreements, and shall also provide the frame work for the implementation, administration and operation of the multilateral Trade Agreements. -(ii) The WTO shall provide the forum for negotiations among its members concerning their multilateral trade relations in matters dealt with under the Agreement in the Annexes to this Agreement. -(iii) The WTO shall administer the Understanding on Rules and Procedures Governing the Settlement of Disputes. -(iv) The WTO shall administer Trade Policy Review Mechanism. -(v) With a view to achieving greater coherence in global economic policy making, the WTO shall cooperate, as appropriate, with the international Monetary Fund (IMF) and with the International Bank for Reconstruction and Development (IBRD) and its affiliated agencies. [43] -The above five listings are the additional functions of the World Trade Organization. As globalization proceeds in today's society, the necessity of an International Organization to manage the trading systems has been of vital importance. As the trade volume increases, issues such as protectionism, trade barriers, subsidies, violation of intellectual property arise due to the differences in the trading rules of every nation. The World Trade Organization serves as the mediator between the nations when such problems arise. WTO could be referred to as the product of globalization and also as one of the most important organizations in today's globalized society. -The WTO is also a center of economic research and analysis: regular assessments of the global trade picture in its annual publications and research reports on specific topics are produced by the organization.[44] Finally, the WTO cooperates closely with the two other components of the Bretton Woods system, the IMF and the World Bank.[40] -Principles of the trading system -The WTO establishes a framework for trade policies; it does not define or specify outcomes. That is, it is concerned with setting the rules of the trade policy games.[45] Five principles are of particular importance in understanding both the pre-1994 GATT and the WTO: -Non-discrimination. It has two major components: the most favoured nation (MFN) rule, and the national treatment policy. Both are embedded in the main WTO rules on goods, services, and intellectual property, but their precise scope and nature differ across these areas. The MFN rule requires that a WTO member must apply the same conditions on all trade with other WTO members, i.e. a WTO member has to grant the most favorable conditions under which it allows trade in a certain product type to all other WTO members.[45] "Grant someone a special favour and you have to do the same for all other WTO members."[29] National treatment means that imported goods should be treated no less favorably than domestically produced goods (at least after the foreign goods have entered the market) and was introduced to tackle non-tariff barriers to trade (e.g. technical standards, security standards et al. discriminating against imported goods).[45] -Reciprocity. It reflects both a desire to limit the scope of free-riding that may arise because of the MFN rule, and a desire to obtain better access to foreign markets. A related point is that for a nation to negotiate, it is necessary that the gain from doing so be greater than the gain available from unilateral liberalization; reciprocal concessions intend to ensure that such gains will materialise.[46] -Binding and enforceable commitments. The tariff commitments made by WTO members in a multilateral trade negotiation and on accession are enumerated in a schedule (list) of concessions. These schedules establish "ceiling bindings": a country can change its bindings, but only after negotiating with its trading partners, which could mean compensating them for loss of trade. If satisfaction is not obtained, the complaining country may invoke the WTO dispute settlement procedures.[29][46] -Transparency. The WTO members are required to publish their trade regulations, to maintain institutions allowing for the review of administrative decisions affecting trade, to respond to requests for information by other members, and to notify changes in trade policies to the WTO. These internal transparency requirements are supplemented and facilitated by periodic country-specific reports (trade policy reviews) through the Trade Policy Review Mechanism (TPRM).[47] The WTO system tries also to improve predictability and stability, discouraging the use of quotas and other measures used to set limits on quantities of imports.[29] -Safety valves. In specific circumstances, governments are able to restrict trade. The WTO's agreements permit members to take measures to protect not only the environment but also public health, animal health and plant health.[48] -There are three types of provision in this direction: -articles allowing for the use of trade measures to attain non-economic objectives; -articles aimed at ensuring "fair competition"; members must not use environmental protection measures as a means of disguising protectionist policies.[48] -provisions permitting intervention in trade for economic reasons.[47] -Exceptions to the MFN principle also allow for preferential treatment of developing countries, regional free trade areas and customs unions.[6]:fol.93 -Organizational structure -The General Council has the following subsidiary bodies which oversee committees in different areas: -Council for Trade in Goods -There are 11 committees under the jurisdiction of the Goods Council each with a specific task. All members of the WTO participate in the committees. The Textiles Monitoring Body is separate from the other committees but still under the jurisdiction of Goods Council. The body has its own chairman and only 10 members. The body also has several groups relating to textiles.[49] -Council for Trade-Related Aspects of Intellectual Property Rights -Information on intellectual property in the WTO, news and official records of the activities of the TRIPS Council, and details of the WTO's work with other international organizations in the field.[50] -Council for Trade in Services -The Council for Trade in Services operates under the guidance of the General Council and is responsible for overseeing the functioning of the General Agreement on Trade in Services (GATS). It is open to all WTO members, and can create subsidiary bodies as required.[51] -Trade Negotiations Committee -The Trade Negotiations Committee (TNC) is the committee that deals with the current trade talks round. The chair is WTO's director-general. As of June 2012 the committee was tasked with the Doha Development Round.[52] -The Service Council has three subsidiary bodies: financial services, domestic regulations, GATS rules and specific commitments.[49] The council has several different committees, working groups, and working parties.[53] There are committees on the following: Trade and Environment; Trade and Development (Subcommittee on Least-Developed Countries); Regional Trade Agreements; Balance of Payments Restrictions; and Budget, Finance and Administration. There are working parties on the following: Accession. There are working groups on the following: Trade, debt and finance; and Trade and technology transfer. -Decision-making -The WTO describes itself as "a rules-based, member-driven organization — all decisions are made by the member governments, and the rules are the outcome of negotiations among members".[54] The WTO Agreement foresees votes where consensus cannot be reached, but the practice of consensus dominates the process of decision-making.[55] -Richard Harold Steinberg (2002) argues that although the WTO's consensus governance model provides law-based initial bargaining, trading rounds close through power-based bargaining favouring Europe and the U.S., and may not lead to Pareto improvement.[56] -Dispute settlement -Main article: Dispute settlement in the WTO -In 1994, the WTO members agreed on the Understanding on Rules and Procedures Governing the Settlement of Disputes (DSU) annexed to the "Final Act" signed in Marrakesh in 1994.[57] Dispute settlement is regarded by the WTO as the central pillar of the multilateral trading system, and as a "unique contribution to the stability of the global economy".[58] WTO members have agreed that, if they believe fellow-members are violating trade rules, they will use the multilateral system of settling disputes instead of taking action unilaterally.[59] -The operation of the WTO dispute settlement process involves the DSB panels, the Appellate Body, the WTO Secretariat, arbitrators, independent experts and several specialized institutions.[60] Bodies involved in the dispute settlement process, World Trade Organization. -Accession and membership -Main article: World Trade Organization accession and membership -The process of becoming a WTO member is unique to each applicant country, and the terms of accession are dependent upon the country's stage of economic development and current trade regime.[61] The process takes about five years, on average, but it can last more if the country is less than fully committed to the process or if political issues interfere. The shortest accession negotiation was that of the Kyrgyz Republic, while the longest was that of Russia, which, having first applied to join GATT in 1993, was approved for membership in December 2011 and became a WTO member on 22 August 2012.[62] The second longest was that of Vanuatu, whose Working Party on the Accession of Vanuatu was established on 11 July 1995. After a final meeting of the Working Party in October 2001, Vanuatu requested more time to consider its accession terms. In 2008, it indicated its interest to resume and conclude its WTO accession. The Working Party on the Accession of Vanuatu was reconvened informally on 4 April 2011 to discuss Vanuatu's future WTO membership. The re-convened Working Party completed its mandate on 2 May 2011. The General Council formally approved the Accession Package of Vanuatu on 26 October 2011. On 24 August 2012, the WTO welcomed Vanuatu as its 157th member.[63] An offer of accession is only given once consensus is reached among interested parties.[64] -Accession process - -WTO accession progress: - Members (including dual-representation with the European Union) - Draft Working Party Report or Factual Summary adopted - Goods and/or Services offers submitted - Memorandum on Foreign Trade Regime (FTR) submitted - Observer, negotiations to start later or no Memorandum on FTR submitted - Frozen procedures or no negotiations in the last 3 years - No official interaction with the WTO -A country wishing to accede to the WTO submits an application to the General Council, and has to describe all aspects of its trade and economic policies that have a bearing on WTO agreements.[65] The application is submitted to the WTO in a memorandum which is examined by a working party open to all interested WTO Members.[66] -After all necessary background information has been acquired, the working party focuses on issues of discrepancy between the WTO rules and the applicant's international and domestic trade policies and laws. The working party determines the terms and conditions of entry into the WTO for the applicant nation, and may consider transitional periods to allow countries some leeway in complying with the WTO rules.[61] -The final phase of accession involves bilateral negotiations between the applicant nation and other working party members regarding the concessions and commitments on tariff levels and market access for goods and services. The new member's commitments are to apply equally to all WTO members under normal non-discrimination rules, even though they are negotiated bilaterally.[65] -When the bilateral talks conclude, the working party sends to the general council or ministerial conference an accession package, which includes a summary of all the working party meetings, the Protocol of Accession (a draft membership treaty), and lists ("schedules") of the member-to-be's commitments. Once the general council or ministerial conference approves of the terms of accession, the applicant's parliament must ratify the Protocol of Accession before it can become a member.[67] Some countries may have faced tougher and a much longer accession process due to challenges during negotiations with other WTO members, such as Vietnam, whose negotiations took more than 11 years before it became official member in January 2007.[68] -Members and observers -The WTO has 160 members and 24 observer governments.[69] In addition to states, the European Union is a member. WTO members do not have to be full sovereign nation-members. Instead, they must be a customs territory with full autonomy in the conduct of their external commercial relations. Thus Hong Kong has been a member since 1995 (as "Hong Kong, China" since 1997) predating the People's Republic of China, which joined in 2001 after 15 years of negotiations. The Republic of China (Taiwan) acceded to the WTO in 2002 as "Separate Customs Territory of Taiwan, Penghu, Kinmen and Matsu" (Chinese Taipei) despite its disputed status.[70] The WTO Secretariat omits the official titles (such as Counselor, First Secretary, Second Secretary and Third Secretary) of the members of Chinese Taipei's Permanent Mission to the WTO, except for the titles of the Permanent Representative and the Deputy Permanent Representative.[71] -As of 2007, WTO member states represented 96.4% of global trade and 96.7% of global GDP.[72] Iran, followed by Algeria, are the economies with the largest GDP and trade outside the WTO, using 2005 data.[73][74] With the exception of the Holy See, observers must start accession negotiations within five years of becoming observers. A number of international intergovernmental organizations have also been granted observer status to WTO bodies.[75] 14 UN member states have no official affiliation with the WTO. -Agreements -Further information: Uruguay Round -The WTO oversees about 60 different agreements which have the status of international legal texts. Member countries must sign and ratify all WTO agreements on accession.[76] A discussion of some of the most important agreements follows. The Agreement on Agriculture came into effect with the establishment of the WTO at the beginning of 1995. The AoA has three central concepts, or "pillars": domestic support, market access and export subsidies. The General Agreement on Trade in Services was created to extend the multilateral trading system to service sector, in the same way as the General Agreement on Tariffs and Trade (GATT) provided such a system for merchandise trade. The agreement entered into force in January 1995. The Agreement on Trade-Related Aspects of Intellectual Property Rights sets down minimum standards for many forms of intellectual property (IP) regulation. It was negotiated at the end of the Uruguay Round of the General Agreement on Tariffs and Trade (GATT) in 1994.[77] -The Agreement on the Application of Sanitary and Phytosanitary Measures—also known as the SPS Agreement—was negotiated during the Uruguay Round of GATT, and entered into force with the establishment of the WTO at the beginning of 1995. Under the SPS agreement, the WTO sets constraints on members' policies relating to food safety (bacterial contaminants, pesticides, inspection and labelling) as well as animal and plant health (imported pests and diseases). The Agreement on Technical Barriers to Trade is an international treaty of the World Trade Organization. It was negotiated during the Uruguay Round of the General Agreement on Tariffs and Trade, and entered into force with the establishment of the WTO at the end of 1994. The object ensures that technical negotiations and standards, as well as testing and certification procedures, do not create unnecessary obstacles to trade".[78] The Agreement on Customs Valuation, formally known as the Agreement on Implementation of Article VII of GATT, prescribes methods of customs valuation that Members are to follow. Chiefly, it adopts the "transaction value" approach. -In December 2013, the biggest agreement within the WTO was signed and known as the Bali Package.[79] -Office of director-general - -The headquarters of the World Trade Organization, in Geneva, Switzerland. -The procedures for the appointment of the WTO director-general were published in January 2003.[80] Additionally, there are four deputy directors-general. As of 1 October 2013, under director-general Roberto Azevêdo, the four deputy directors-general are Yi Xiaozhun of China, Karl-Ernst Brauner of Germany, Yonov Frederick Agah of Nigeria and David Shark of the United States.[81] -List of directors-general -Source: Official website[82] -Brazil Roberto Azevedo, 2013– -France Pascal Lamy, 2005–2013 -Thailand Supachai Panitchpakdi, 2002–2005 -New Zealand Mike Moore, 1999–2002 -Italy Renato Ruggiero, 1995–1999 -Republic of Ireland Peter Sutherland, 1995 -(Heads of the precursor organization, GATT): -Republic of Ireland Peter Sutherland, 1993–1995 -Switzerland Arthur Dunkel, 1980–1993 -Switzerland Olivier Long, 1968–1980 -United Kingdom Eric Wyndham White, 1948–1968 -See also -Agreement on Trade Related Investment Measures (TRIMS) -Agreement on Trade-Related Aspects of Intellectual Property Rights (TRIPS) -Aide-mémoire non-paper -Anti-globalization movement -Criticism of the World Trade Organization -Foreign Affiliate Trade Statistics -Global administrative law -Globality -Information Technology Agreement -International Trade Centre -Labour Standards in the World Trade Organisation -List of member states of the World Trade Organization -North American Free Trade Agreement (NAFTA) -Subsidy -Swiss Formula -Trade bloc -Washington Consensus -World Trade Report -World Trade Organization Ministerial Conference of 1999 protest activity -China and the World Trade Organization -Notes and references -Jump up ^ Members and Observers at WTO official website -Jump up ^ Languages, Documentation and Information Management Division at WTO official site -Jump up ^ "WTO Secretariat budget for 2011". WTO official site. Retrieved 25 August 2008. -Jump up ^ Understanding the WTO: What We Stand For_ Fact File -Jump up ^ World Trade Organization - UNDERSTANDING THE WTO: BASICS -^ Jump up to: a b Understanding the WTO Handbook at WTO official website. (Note that the document's printed folio numbers do not match the pdf page numbers.) -Jump up ^ Malanczuk, P. (1999). "International Organisations and Space Law: World Trade Organization". Encyclopaedia Britannica 442. p. 305. Bibcode:1999ESASP.442..305M. -Jump up ^ Understanding the WTO: The Doha Agenda -Jump up ^ The Challenges to the World Trade Organization: It’s All About Legitimacy THE BROOKINGS INSTITUTION, Policy Paper 2011-04 -Jump up ^ GROUPS IN THE WTO Updated 1 July 2013 -Jump up ^ Bourcier, Nicolas (21 May 2013). "Roberto Azevedo's WTO appointment gives Brazil a seat at the top table". Guardian Weekly. Retrieved 2 September 2013. -Jump up ^ "Roberto Azevêdo takes over". WTO official website. 1 September 2013. Retrieved 2 September 2013. -Jump up ^ "Overview of the WTO Secretariat". WTO official website. Retrieved 2 September 2013. -Jump up ^ Ninth WTO Ministerial Conference | WTO - MC9 -Jump up ^ BBC News - WTO agrees global trade deal worth $1tn -Jump up ^ A.E. Eckes Jr., US Trade History, 73 -* A. Smithies, Reflections on the Work of Keynes, 578–601 -* N. Warren, Internet and Globalization, 193 -Jump up ^ P. van den Bossche, The Law and Policy of the World Trade Organization, 80 -Jump up ^ Palmeter-Mavroidis, Dispute Settlement, 2 -Jump up ^ Fergusson, Ian F. (9 May 2007). "The World Trade Organization: Background and Issues" (PDF). Congressional Research Service. p. 4. Retrieved 15 August 2008. -Jump up ^ It was contemplated that the GATT would be applied for several years until the ITO came into force. However, since the ITO was never brought into being, the GATT gradually became the focus for international governmental cooperation on trade matters with economist Nicholas Halford overseeing the implementation of GATT in members policies. (P. van den Bossche, The Law and Policy of the World Trade Organization, 81; J.H. Jackson, Managing the Trading System, 134). -^ Jump up to: a b The GATT Years: from Havana to Marrakesh, WTO official site -Jump up ^ Footer, M. E. Analysis of the World Trade Organization, 17 -Jump up ^ B.S. Klapper, With a "Short Window" -Jump up ^ Lula, Time to Get Serious about Agricultural Subsidies -^ Jump up to: a b c P. Gallagher, The First Ten Years of the WTO, 4 -^ Jump up to: a b The Uruguay Round, WTO official site -Jump up ^ "Legal texts – Marrakesh agreement". WTO. Retrieved 30 May 2010. -Jump up ^ Overview: a Navigational Guide, WTO official site. For the complete list of "The Uruguay Round Agreements", see WTO legal texts, WTO official site, and Uruguay Round Agreements, Understandings, Decisions and Declarations, WorldTradeLaw.net -^ Jump up to: a b c d Principles of the Trading System, WTO official site -Jump up ^ "Five Years of China WTO Membership. EU and US Perspectives about China's Compliance with Transparency Commitments and the Transitional Review Mechanism". Papers.ssrn.com. Retrieved 30 May 2010. -Jump up ^ WTO to hold 7th Ministerial Conference on 30 November-2 December 2009 WTO official website -Jump up ^ "In the twilight of Doha". The Economist (The Economist): 65. 27 July 2006. -^ Jump up to: a b European Commission The Doha Round -Jump up ^ Fergusson, Ian F. (18 January 2008). "World Trade Organization Negotiations: The Doha Development Agenda" (PDF). Congressional Research Service. Retrieved 13 April 2012. Page 9 (folio CRS-6) -Jump up ^ WTO trade negotiations: Doha Development Agenda Europa press release, 31 October 2011 -Jump up ^ "Members start negotiating proposal on poor countries’ food stockholding". WTO official website. 27 March 2013. Retrieved 2 September 2013. -Jump up ^ a)The GATT years: from Havana to Marrakesh, World Trade Organization -b)Timeline: World Trade Organization – A chronology of key events, BBC News -c)Brakman-Garretsen-Marrewijk-Witteloostuijn, Nations and Firms in the Global Economy, Chapter 10: Trade and Capital Restriction -Jump up ^ Functions of the WTO, IISD -^ Jump up to: a b Main Functions, WTO official site -^ Jump up to: a b A Bredimas, International Economic Law, II, 17 -^ Jump up to: a b C. Deere, Decision-making in the WTO: Medieval or Up-to-Date? -Jump up ^ WTO Assistance for Developing Countries[dead link], WTO official site -Jump up ^ Sinha, Aparijita. [1]. "What are the functions and objectives of the WTO?". Retrieved on 13 April, 2014. -Jump up ^ Economic research and analysis, WTO official site -^ Jump up to: a b c B. Hoekman, The WTO: Functions and Basic Principles, 42 -^ Jump up to: a b B. Hoekman, The WTO: Functions and Basic Principles, 43 -^ Jump up to: a b B. Hoekman, The WTO: Functions and Basic Principles, 44 -^ Jump up to: a b Understanding the WTO: What we stand for -^ Jump up to: a b "Fourth level: down to the nitty-gritty". WTO official site. Retrieved 18 August 2008. -Jump up ^ "Intellectual property – overview of TRIPS Agreement". Wto.org. 15 April 1994. Retrieved 30 May 2010. -Jump up ^ "The Services Council, its Committees and other subsidiary bodies". WTO official site. Retrieved 14 August 2008. -Jump up ^ "The Trade Negotiations Committee". WTO official site. Retrieved 14 August 2008. -Jump up ^ "WTO organization chart". WTO official site. Retrieved 14 August 2008. -Jump up ^ Decision-making at WTO official site -Jump up ^ Decision-Making in the World Trade Organization Abstract from Journal of International Economic Law at Oxford Journals -Jump up ^ Steinberg, Richard H. "In the Shadow of Law or Power? Consensus-based Bargaining and Outcomes in the GATT/WTO." International Organization. Spring 2002. pp. 339–374. -Jump up ^ Stewart-Dawyer, The WTO Dispute Settlement System, 7 -Jump up ^ S. Panitchpakdi, The WTO at ten, 8. -Jump up ^ Settling Disputes:a Unique Contribution, WTO official site -Jump up ^ "Disputes – Dispute Settlement CBT – WTO Bodies involved in the dispute settlement process – The Dispute Settlement Body (DSB) – Page 1". WTO. 25 July 1996. Retrieved 21 May 2011. -^ Jump up to: a b Accessions Summary, Center for International Development -Jump up ^ Ministerial Conference approves Russia's WTO membership WTO News Item, 16 December 2011 -Jump up ^ Accession status: Vanuatu. WTO. Retrieved on 12 July 2013. -Jump up ^ C. Michalopoulos, WTO Accession, 64 -^ Jump up to: a b Membership, Alliances and Bureaucracy, WTO official site -Jump up ^ C. Michalopoulos, WTO Accession, 62–63 -Jump up ^ How to Become a Member of the WTO, WTO official site -Jump up ^ Napier, Nancy K.; Vuong, Quan Hoang (2013). What we see, why we worry, why we hope: Vietnam going forward. Boise, ID, USA: Boise State University CCI Press. p. 140. ISBN 978-0985530587. -Jump up ^ "Members and Observers". World Trade Organization. 24 August 2012. -Jump up ^ Jackson, J. H. Sovereignty, 109 -Jump up ^ ROC Government Publication -Jump up ^ "Accession in perspective". World Trade Organization. Retrieved 22 December 2013. -Jump up ^ "ANNEX 1. STATISTICAL SURVEY". World Trade Organization. 2005. Retrieved 22 December 2013. -Jump up ^ Arjomandy, Danial (21 November 2013). "Iranian Membership in the World Trade Organization: An Unclear Future". Iranian Studies. Retrieved 22 December 2013. -Jump up ^ International intergovernmental organizations granted observer status to WTO bodies at WTO official website -Jump up ^ "Legal texts – the WTO agreements". WTO. Retrieved 30 May 2010. -Jump up ^ Understanding the WTO - Intellectual property: protection and enforcement. WTO. Retrieved on 29 July 2013. -Jump up ^ "A Summary of the Final Act of the Uruguay Round". Wto.org. Retrieved 30 May 2010. -Jump up ^ Zarocostas, John (7 December 2013). "Global Trade Deal Reached". WWD. Retrieved 8 December 2013. -Jump up ^ "WT/L/509". WTO. Retrieved 18 February 2013. -Jump up ^ "Director-General Elect Azevêdo announces his four Deputy Directors-General". 17 August 2013. Retrieved 2 September 2013. -Jump up ^ "Previous GATT and WTO Directors-General". WTO. Retrieved 21 May 2011. -External links - Wikiquote has quotations related to: World Trade Organization - Wikimedia Commons has media related to World Trade Organization. -Official pages -Official WTO homepage -WTO 10th Anniversary PDF (1.40 MB) — Highlights of the first decade, Annual Report 2005 pages 116–166 -Glossary of terms—a guide to 'WTO-speak' -International Trade Centre — joint UN/WTO agency -Government pages on the WTO -European Union position on the WTO -Media pages on the WTO -World Trade Organization -BBC News — Profile: WTO -Guardian Unlimited — Special Report: The World Trade Organisation ongoing coverage -Non-governmental organization pages on the WTO -Gatt.org — Parody of official WTO page by The Yes Men -Public Citizen -Transnational Institute: Beyond the WTO -[show] v t e -World Trade Organization -[show] v t e -International trade -[show] v t e -International organizations -Authority control -WorldCat VIAF: 149937768 LCCN: no94018277 ISNI: 0000 0001 2296 2735 GND: 2145784-0 SELIBR: 135910 ULAN: 500292980 NDL: 00577475 NKC: kn20010711437 BNE: XX4574846 -Categories: World Trade OrganizationInternational tradeInternational trade organizationsOrganisations based in GenevaOrganizations established in 1995World government -Navigation menu -Create accountLog inArticleTalkReadView sourceView history - -Main page -Contents -Featured content -Current events -Random article -Donate to Wikipedia -Wikimedia Shop -Interaction -Help -About Wikipedia -Community portal -Recent changes -Contact page -Tools -What links here -Related changes -Upload file -Special pages -Permanent link -Page information -Wikidata item -Cite this page -Print/export -Create a book -Download as PDF -Printable version -Languages -Afrikaans -العربية -Aragonés -Asturianu -Azərbaycanca -বাংলা -Bân-lâm-gú -Беларуская -Беларуская (тарашкевіца)‎ -Български -Bosanski -Brezhoneg -Català -Čeština -Cymraeg -Dansk -Deutsch -Eesti -Ελληνικά -Español -Esperanto -Euskara -فارسی -Fiji Hindi -Føroyskt -Français -Frysk -Galego -ગુજરાતી -客家語/Hak-kâ-ngî -한국어 -Հայերեն -हिन्दी -Hrvatski -Ido -Ilokano -Bahasa Indonesia -Íslenska -Italiano -עברית -Basa Jawa -ಕನ್ನಡ -Къарачай-малкъар -ქართული -Қазақша -Kiswahili -Latina -Latviešu -Lietuvių -Magyar -Македонски -മലയാളം -मराठी -مصرى -Bahasa Melayu -Baso Minangkabau -မြန်မာဘာသာ -Nederlands -नेपाली -नेपाल भाषा -日本語 -Нохчийн -Norsk bokmål -Norsk nynorsk -Occitan -Oʻzbekcha -ਪੰਜਾਬੀ -پنجابی -پښتو -ភាសាខ្មែរ -Piemontèis -Polski -Português -Română -Русиньскый -Русский -Саха тыла -Shqip -සිංහල -Simple English -Slovenčina -Slovenščina -کوردی -Српски / srpski -Srpskohrvatski / српскохрватски -Suomi -Svenska -Tagalog -தமிழ் -Татарча/tatarça -తెలుగు -ไทย -Тоҷикӣ -Türkçe -Türkmençe -Українська -اردو -ئۇيغۇرچە / Uyghurche -Tiếng Việt -Winaray -ייִדיש -Yorùbá -粵語 -Žemaitėška -中文 -Edit links -This page was last modified on 22 November 2014 at 14:33. -Text is available under the Creative Commons Attribution-ShareAlike License; additional terms may apply. By using this site, you agree to the Terms of Use and Privacy Policy. Wikipedia® is a registered trademark of the Wikimedia Foundation, Inc., a non-profit organization. -Privacy policyAbout WikipediaDisclaimersContact WikipediaDevelopersMobile viewWikimedia Foundation Powered by MediaWiki \ No newline at end of file diff --git a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_datetime.go b/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_datetime.go deleted file mode 100644 index a316ee4da..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_datetime.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package facet - -import ( - "reflect" - "sort" - "time" - - "github.com/blevesearch/bleve/v2/numeric" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" -) - -var reflectStaticSizeDateTimeFacetBuilder int -var reflectStaticSizedateTimeRange int - -func init() { - var dtfb DateTimeFacetBuilder - reflectStaticSizeDateTimeFacetBuilder = int(reflect.TypeOf(dtfb).Size()) - var dtr dateTimeRange - reflectStaticSizedateTimeRange = int(reflect.TypeOf(dtr).Size()) -} - -type dateTimeRange struct { - start time.Time - end time.Time -} - -type DateTimeFacetBuilder struct { - size int - field string - termsCount map[string]int - total int - missing int - ranges map[string]*dateTimeRange - sawValue bool -} - -func NewDateTimeFacetBuilder(field string, size int) *DateTimeFacetBuilder { - return &DateTimeFacetBuilder{ - size: size, - field: field, - termsCount: make(map[string]int), - ranges: make(map[string]*dateTimeRange, 0), - } -} - -func (fb *DateTimeFacetBuilder) Size() int { - sizeInBytes := reflectStaticSizeDateTimeFacetBuilder + size.SizeOfPtr + - len(fb.field) - - for k, _ := range fb.termsCount { - sizeInBytes += size.SizeOfString + len(k) + - size.SizeOfInt - } - - for k, _ := range fb.ranges { - sizeInBytes += size.SizeOfString + len(k) + - size.SizeOfPtr + reflectStaticSizedateTimeRange - } - - return sizeInBytes -} - -func (fb *DateTimeFacetBuilder) AddRange(name string, start, end time.Time) { - r := dateTimeRange{ - start: start, - end: end, - } - fb.ranges[name] = &r -} - -func (fb *DateTimeFacetBuilder) Field() string { - return fb.field -} - -func (fb *DateTimeFacetBuilder) UpdateVisitor(field string, term []byte) { - if field == fb.field { - fb.sawValue = true - // only consider the values which are shifted 0 - prefixCoded := numeric.PrefixCoded(term) - shift, err := prefixCoded.Shift() - if err == nil && shift == 0 { - i64, err := prefixCoded.Int64() - if err == nil { - t := time.Unix(0, i64) - - // look at each of the ranges for a match - for rangeName, r := range fb.ranges { - if (r.start.IsZero() || t.After(r.start) || t.Equal(r.start)) && (r.end.IsZero() || t.Before(r.end)) { - fb.termsCount[rangeName] = fb.termsCount[rangeName] + 1 - fb.total++ - } - } - } - } - } -} - -func (fb *DateTimeFacetBuilder) StartDoc() { - fb.sawValue = false -} - -func (fb *DateTimeFacetBuilder) EndDoc() { - if !fb.sawValue { - fb.missing++ - } -} - -func (fb *DateTimeFacetBuilder) Result() *search.FacetResult { - rv := search.FacetResult{ - Field: fb.field, - Total: fb.total, - Missing: fb.missing, - } - - rv.DateRanges = make([]*search.DateRangeFacet, 0, len(fb.termsCount)) - - for term, count := range fb.termsCount { - dateRange := fb.ranges[term] - tf := &search.DateRangeFacet{ - Name: term, - Count: count, - } - if !dateRange.start.IsZero() { - start := dateRange.start.Format(time.RFC3339Nano) - tf.Start = &start - } - if !dateRange.end.IsZero() { - end := dateRange.end.Format(time.RFC3339Nano) - tf.End = &end - } - rv.DateRanges = append(rv.DateRanges, tf) - } - - sort.Sort(rv.DateRanges) - - // we now have the list of the top N facets - if fb.size < len(rv.DateRanges) { - rv.DateRanges = rv.DateRanges[:fb.size] - } - - notOther := 0 - for _, nr := range rv.DateRanges { - notOther += nr.Count - } - rv.Other = fb.total - notOther - - return &rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_numeric.go b/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_numeric.go deleted file mode 100644 index 6d0c6c9d1..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_numeric.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package facet - -import ( - "reflect" - "sort" - - "github.com/blevesearch/bleve/v2/numeric" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" -) - -var reflectStaticSizeNumericFacetBuilder int -var reflectStaticSizenumericRange int - -func init() { - var nfb NumericFacetBuilder - reflectStaticSizeNumericFacetBuilder = int(reflect.TypeOf(nfb).Size()) - var nr numericRange - reflectStaticSizenumericRange = int(reflect.TypeOf(nr).Size()) -} - -type numericRange struct { - min *float64 - max *float64 -} - -type NumericFacetBuilder struct { - size int - field string - termsCount map[string]int - total int - missing int - ranges map[string]*numericRange - sawValue bool -} - -func NewNumericFacetBuilder(field string, size int) *NumericFacetBuilder { - return &NumericFacetBuilder{ - size: size, - field: field, - termsCount: make(map[string]int), - ranges: make(map[string]*numericRange, 0), - } -} - -func (fb *NumericFacetBuilder) Size() int { - sizeInBytes := reflectStaticSizeNumericFacetBuilder + size.SizeOfPtr + - len(fb.field) - - for k, _ := range fb.termsCount { - sizeInBytes += size.SizeOfString + len(k) + - size.SizeOfInt - } - - for k, _ := range fb.ranges { - sizeInBytes += size.SizeOfString + len(k) + - size.SizeOfPtr + reflectStaticSizenumericRange - } - - return sizeInBytes -} - -func (fb *NumericFacetBuilder) AddRange(name string, min, max *float64) { - r := numericRange{ - min: min, - max: max, - } - fb.ranges[name] = &r -} - -func (fb *NumericFacetBuilder) Field() string { - return fb.field -} - -func (fb *NumericFacetBuilder) UpdateVisitor(field string, term []byte) { - if field == fb.field { - fb.sawValue = true - // only consider the values which are shifted 0 - prefixCoded := numeric.PrefixCoded(term) - shift, err := prefixCoded.Shift() - if err == nil && shift == 0 { - i64, err := prefixCoded.Int64() - if err == nil { - f64 := numeric.Int64ToFloat64(i64) - - // look at each of the ranges for a match - for rangeName, r := range fb.ranges { - if (r.min == nil || f64 >= *r.min) && (r.max == nil || f64 < *r.max) { - fb.termsCount[rangeName] = fb.termsCount[rangeName] + 1 - fb.total++ - } - } - } - } - } -} - -func (fb *NumericFacetBuilder) StartDoc() { - fb.sawValue = false -} - -func (fb *NumericFacetBuilder) EndDoc() { - if !fb.sawValue { - fb.missing++ - } -} - -func (fb *NumericFacetBuilder) Result() *search.FacetResult { - rv := search.FacetResult{ - Field: fb.field, - Total: fb.total, - Missing: fb.missing, - } - - rv.NumericRanges = make([]*search.NumericRangeFacet, 0, len(fb.termsCount)) - - for term, count := range fb.termsCount { - numericRange := fb.ranges[term] - tf := &search.NumericRangeFacet{ - Name: term, - Count: count, - Min: numericRange.min, - Max: numericRange.max, - } - - rv.NumericRanges = append(rv.NumericRanges, tf) - } - - sort.Sort(rv.NumericRanges) - - // we now have the list of the top N facets - if fb.size < len(rv.NumericRanges) { - rv.NumericRanges = rv.NumericRanges[:fb.size] - } - - notOther := 0 - for _, nr := range rv.NumericRanges { - notOther += nr.Count - } - rv.Other = fb.total - notOther - - return &rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_terms.go b/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_terms.go deleted file mode 100644 index 1b378db7c..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_terms.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package facet - -import ( - "reflect" - "sort" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" -) - -var reflectStaticSizeTermsFacetBuilder int - -func init() { - var tfb TermsFacetBuilder - reflectStaticSizeTermsFacetBuilder = int(reflect.TypeOf(tfb).Size()) -} - -type TermsFacetBuilder struct { - size int - field string - termsCount map[string]int - total int - missing int - sawValue bool -} - -func NewTermsFacetBuilder(field string, size int) *TermsFacetBuilder { - return &TermsFacetBuilder{ - size: size, - field: field, - termsCount: make(map[string]int), - } -} - -func (fb *TermsFacetBuilder) Size() int { - sizeInBytes := reflectStaticSizeTermsFacetBuilder + size.SizeOfPtr + - len(fb.field) - - for k, _ := range fb.termsCount { - sizeInBytes += size.SizeOfString + len(k) + - size.SizeOfInt - } - - return sizeInBytes -} - -func (fb *TermsFacetBuilder) Field() string { - return fb.field -} - -func (fb *TermsFacetBuilder) UpdateVisitor(field string, term []byte) { - if field == fb.field { - fb.sawValue = true - fb.termsCount[string(term)] = fb.termsCount[string(term)] + 1 - fb.total++ - } -} - -func (fb *TermsFacetBuilder) StartDoc() { - fb.sawValue = false -} - -func (fb *TermsFacetBuilder) EndDoc() { - if !fb.sawValue { - fb.missing++ - } -} - -func (fb *TermsFacetBuilder) Result() *search.FacetResult { - rv := search.FacetResult{ - Field: fb.field, - Total: fb.total, - Missing: fb.missing, - } - - rv.Terms = make([]*search.TermFacet, 0, len(fb.termsCount)) - - for term, count := range fb.termsCount { - tf := &search.TermFacet{ - Term: term, - Count: count, - } - - rv.Terms = append(rv.Terms, tf) - } - - sort.Sort(rv.Terms) - - // we now have the list of the top N facets - trimTopN := fb.size - if trimTopN > len(rv.Terms) { - trimTopN = len(rv.Terms) - } - rv.Terms = rv.Terms[:trimTopN] - - notOther := 0 - for _, tf := range rv.Terms { - notOther += tf.Count - } - rv.Other = fb.total - notOther - - return &rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/facets_builder.go b/vendor/github.com/blevesearch/bleve/v2/search/facets_builder.go deleted file mode 100644 index 9822257f7..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/facets_builder.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package search - -import ( - "reflect" - "sort" - - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeFacetsBuilder int -var reflectStaticSizeFacetResult int -var reflectStaticSizeTermFacet int -var reflectStaticSizeNumericRangeFacet int -var reflectStaticSizeDateRangeFacet int - -func init() { - var fb FacetsBuilder - reflectStaticSizeFacetsBuilder = int(reflect.TypeOf(fb).Size()) - var fr FacetResult - reflectStaticSizeFacetResult = int(reflect.TypeOf(fr).Size()) - var tf TermFacet - reflectStaticSizeTermFacet = int(reflect.TypeOf(tf).Size()) - var nrf NumericRangeFacet - reflectStaticSizeNumericRangeFacet = int(reflect.TypeOf(nrf).Size()) - var drf DateRangeFacet - reflectStaticSizeDateRangeFacet = int(reflect.TypeOf(drf).Size()) -} - -type FacetBuilder interface { - StartDoc() - UpdateVisitor(field string, term []byte) - EndDoc() - - Result() *FacetResult - Field() string - - Size() int -} - -type FacetsBuilder struct { - indexReader index.IndexReader - facetNames []string - facets []FacetBuilder - fields []string -} - -func NewFacetsBuilder(indexReader index.IndexReader) *FacetsBuilder { - return &FacetsBuilder{ - indexReader: indexReader, - } -} - -func (fb *FacetsBuilder) Size() int { - sizeInBytes := reflectStaticSizeFacetsBuilder + size.SizeOfPtr - - for k, v := range fb.facets { - sizeInBytes += size.SizeOfString + v.Size() + len(fb.facetNames[k]) - } - - for _, entry := range fb.fields { - sizeInBytes += size.SizeOfString + len(entry) - } - - return sizeInBytes -} - -func (fb *FacetsBuilder) Add(name string, facetBuilder FacetBuilder) { - fb.facetNames = append(fb.facetNames, name) - fb.facets = append(fb.facets, facetBuilder) - fb.fields = append(fb.fields, facetBuilder.Field()) -} - -func (fb *FacetsBuilder) RequiredFields() []string { - return fb.fields -} - -func (fb *FacetsBuilder) StartDoc() { - for _, facetBuilder := range fb.facets { - facetBuilder.StartDoc() - } -} - -func (fb *FacetsBuilder) EndDoc() { - for _, facetBuilder := range fb.facets { - facetBuilder.EndDoc() - } -} - -func (fb *FacetsBuilder) UpdateVisitor(field string, term []byte) { - for _, facetBuilder := range fb.facets { - facetBuilder.UpdateVisitor(field, term) - } -} - -type TermFacet struct { - Term string `json:"term"` - Count int `json:"count"` -} - -type TermFacets []*TermFacet - -func (tf TermFacets) Add(termFacet *TermFacet) TermFacets { - for _, existingTerm := range tf { - if termFacet.Term == existingTerm.Term { - existingTerm.Count += termFacet.Count - return tf - } - } - // if we got here it wasn't already in the existing terms - tf = append(tf, termFacet) - return tf -} - -func (tf TermFacets) Len() int { return len(tf) } -func (tf TermFacets) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } -func (tf TermFacets) Less(i, j int) bool { - if tf[i].Count == tf[j].Count { - return tf[i].Term < tf[j].Term - } - return tf[i].Count > tf[j].Count -} - -type NumericRangeFacet struct { - Name string `json:"name"` - Min *float64 `json:"min,omitempty"` - Max *float64 `json:"max,omitempty"` - Count int `json:"count"` -} - -func (nrf *NumericRangeFacet) Same(other *NumericRangeFacet) bool { - if nrf.Min == nil && other.Min != nil { - return false - } - if nrf.Min != nil && other.Min == nil { - return false - } - if nrf.Min != nil && other.Min != nil && *nrf.Min != *other.Min { - return false - } - if nrf.Max == nil && other.Max != nil { - return false - } - if nrf.Max != nil && other.Max == nil { - return false - } - if nrf.Max != nil && other.Max != nil && *nrf.Max != *other.Max { - return false - } - - return true -} - -type NumericRangeFacets []*NumericRangeFacet - -func (nrf NumericRangeFacets) Add(numericRangeFacet *NumericRangeFacet) NumericRangeFacets { - for _, existingNr := range nrf { - if numericRangeFacet.Same(existingNr) { - existingNr.Count += numericRangeFacet.Count - return nrf - } - } - // if we got here it wasn't already in the existing terms - nrf = append(nrf, numericRangeFacet) - return nrf -} - -func (nrf NumericRangeFacets) Len() int { return len(nrf) } -func (nrf NumericRangeFacets) Swap(i, j int) { nrf[i], nrf[j] = nrf[j], nrf[i] } -func (nrf NumericRangeFacets) Less(i, j int) bool { - if nrf[i].Count == nrf[j].Count { - return nrf[i].Name < nrf[j].Name - } - return nrf[i].Count > nrf[j].Count -} - -type DateRangeFacet struct { - Name string `json:"name"` - Start *string `json:"start,omitempty"` - End *string `json:"end,omitempty"` - Count int `json:"count"` -} - -func (drf *DateRangeFacet) Same(other *DateRangeFacet) bool { - if drf.Start == nil && other.Start != nil { - return false - } - if drf.Start != nil && other.Start == nil { - return false - } - if drf.Start != nil && other.Start != nil && *drf.Start != *other.Start { - return false - } - if drf.End == nil && other.End != nil { - return false - } - if drf.End != nil && other.End == nil { - return false - } - if drf.End != nil && other.End != nil && *drf.End != *other.End { - return false - } - - return true -} - -type DateRangeFacets []*DateRangeFacet - -func (drf DateRangeFacets) Add(dateRangeFacet *DateRangeFacet) DateRangeFacets { - for _, existingDr := range drf { - if dateRangeFacet.Same(existingDr) { - existingDr.Count += dateRangeFacet.Count - return drf - } - } - // if we got here it wasn't already in the existing terms - drf = append(drf, dateRangeFacet) - return drf -} - -func (drf DateRangeFacets) Len() int { return len(drf) } -func (drf DateRangeFacets) Swap(i, j int) { drf[i], drf[j] = drf[j], drf[i] } -func (drf DateRangeFacets) Less(i, j int) bool { - if drf[i].Count == drf[j].Count { - return drf[i].Name < drf[j].Name - } - return drf[i].Count > drf[j].Count -} - -type FacetResult struct { - Field string `json:"field"` - Total int `json:"total"` - Missing int `json:"missing"` - Other int `json:"other"` - Terms TermFacets `json:"terms,omitempty"` - NumericRanges NumericRangeFacets `json:"numeric_ranges,omitempty"` - DateRanges DateRangeFacets `json:"date_ranges,omitempty"` -} - -func (fr *FacetResult) Size() int { - return reflectStaticSizeFacetResult + size.SizeOfPtr + - len(fr.Field) + - len(fr.Terms)*(reflectStaticSizeTermFacet+size.SizeOfPtr) + - len(fr.NumericRanges)*(reflectStaticSizeNumericRangeFacet+size.SizeOfPtr) + - len(fr.DateRanges)*(reflectStaticSizeDateRangeFacet+size.SizeOfPtr) -} - -func (fr *FacetResult) Merge(other *FacetResult) { - fr.Total += other.Total - fr.Missing += other.Missing - fr.Other += other.Other - if fr.Terms != nil && other.Terms != nil { - for _, term := range other.Terms { - fr.Terms = fr.Terms.Add(term) - } - } - if fr.NumericRanges != nil && other.NumericRanges != nil { - for _, nr := range other.NumericRanges { - fr.NumericRanges = fr.NumericRanges.Add(nr) - } - } - if fr.DateRanges != nil && other.DateRanges != nil { - for _, dr := range other.DateRanges { - fr.DateRanges = fr.DateRanges.Add(dr) - } - } -} - -func (fr *FacetResult) Fixup(size int) { - if fr.Terms != nil { - sort.Sort(fr.Terms) - if len(fr.Terms) > size { - moveToOther := fr.Terms[size:] - for _, mto := range moveToOther { - fr.Other += mto.Count - } - fr.Terms = fr.Terms[0:size] - } - } else if fr.NumericRanges != nil { - sort.Sort(fr.NumericRanges) - if len(fr.NumericRanges) > size { - moveToOther := fr.NumericRanges[size:] - for _, mto := range moveToOther { - fr.Other += mto.Count - } - fr.NumericRanges = fr.NumericRanges[0:size] - } - } else if fr.DateRanges != nil { - sort.Sort(fr.DateRanges) - if len(fr.DateRanges) > size { - moveToOther := fr.DateRanges[size:] - for _, mto := range moveToOther { - fr.Other += mto.Count - } - fr.DateRanges = fr.DateRanges[0:size] - } - } -} - -type FacetResults map[string]*FacetResult - -func (fr FacetResults) Merge(other FacetResults) { - for name, oFacetResult := range other { - facetResult, ok := fr[name] - if ok { - facetResult.Merge(oFacetResult) - } else { - fr[name] = oFacetResult - } - } -} - -func (fr FacetResults) Fixup(name string, size int) { - facetResult, ok := fr[name] - if ok { - facetResult.Fixup(size) - } -} - -func (fb *FacetsBuilder) Results() FacetResults { - fr := make(FacetResults) - for i, facetBuilder := range fb.facets { - facetResult := facetBuilder.Result() - fr[fb.facetNames[i]] = facetResult - } - return fr -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/highlight/format/html/html.go b/vendor/github.com/blevesearch/bleve/v2/search/highlight/format/html/html.go deleted file mode 100644 index a0658d9c7..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/highlight/format/html/html.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package html - -import ( - "html" - - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/bleve/v2/search/highlight" -) - -const Name = "html" - -const defaultHTMLHighlightBefore = "" -const defaultHTMLHighlightAfter = "" - -type FragmentFormatter struct { - before string - after string -} - -func NewFragmentFormatter(before, after string) *FragmentFormatter { - return &FragmentFormatter{ - before: before, - after: after, - } -} - -func (a *FragmentFormatter) Format(f *highlight.Fragment, orderedTermLocations highlight.TermLocations) string { - rv := "" - curr := f.Start - for _, termLocation := range orderedTermLocations { - if termLocation == nil { - continue - } - // make sure the array positions match - if !termLocation.ArrayPositions.Equals(f.ArrayPositions) { - continue - } - if termLocation.Start < curr { - continue - } - if termLocation.End > f.End { - break - } - // add the stuff before this location - rv += html.EscapeString(string(f.Orig[curr:termLocation.Start])) - // start the tag - rv += a.before - // add the term itself - rv += html.EscapeString(string(f.Orig[termLocation.Start:termLocation.End])) - // end the tag - rv += a.after - // update current - curr = termLocation.End - } - // add any remaining text after the last token - rv += html.EscapeString(string(f.Orig[curr:f.End])) - - return rv -} - -func Constructor(config map[string]interface{}, cache *registry.Cache) (highlight.FragmentFormatter, error) { - before := defaultHTMLHighlightBefore - beforeVal, ok := config["before"].(string) - if ok { - before = beforeVal - } - after := defaultHTMLHighlightAfter - afterVal, ok := config["after"].(string) - if ok { - after = afterVal - } - return NewFragmentFormatter(before, after), nil -} - -func init() { - registry.RegisterFragmentFormatter(Name, Constructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/highlight/fragmenter/simple/simple.go b/vendor/github.com/blevesearch/bleve/v2/search/highlight/fragmenter/simple/simple.go deleted file mode 100644 index 348dc1ce3..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/highlight/fragmenter/simple/simple.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package simple - -import ( - "unicode/utf8" - - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/bleve/v2/search/highlight" -) - -const Name = "simple" - -const defaultFragmentSize = 200 - -type Fragmenter struct { - fragmentSize int -} - -func NewFragmenter(fragmentSize int) *Fragmenter { - return &Fragmenter{ - fragmentSize: fragmentSize, - } -} - -func (s *Fragmenter) Fragment(orig []byte, ot highlight.TermLocations) []*highlight.Fragment { - var rv []*highlight.Fragment - maxbegin := 0 -OUTER: - for currTermIndex, termLocation := range ot { - // start with this - // it should be the highest scoring fragment with this term first - start := termLocation.Start - end := start - used := 0 - for end < len(orig) && used < s.fragmentSize { - r, size := utf8.DecodeRune(orig[end:]) - if r == utf8.RuneError { - continue OUTER // bail - } - end += size - used++ - } - - // if we still have more characters available to us - // push back towards beginning - // without cross maxbegin - for start > 0 && used < s.fragmentSize { - if start > len(orig) { - // bail if out of bounds, possibly due to token replacement - // e.g with a regexp replacement - continue OUTER - } - r, size := utf8.DecodeLastRune(orig[0:start]) - if r == utf8.RuneError { - continue OUTER // bail - } - if start-size >= maxbegin { - start -= size - used++ - } else { - break - } - } - - // however, we'd rather have the tokens centered more in the frag - // lets try to do that as best we can, without affecting the score - // find the end of the last term in this fragment - minend := end - for _, innerTermLocation := range ot[currTermIndex:] { - if innerTermLocation.End > end { - break - } - minend = innerTermLocation.End - } - - // find the smaller of the two rooms to move - roomToMove := utf8.RuneCount(orig[minend:end]) - roomToMoveStart := 0 - if start >= maxbegin { - roomToMoveStart = utf8.RuneCount(orig[maxbegin:start]) - } - if roomToMoveStart < roomToMove { - roomToMove = roomToMoveStart - } - - offset := roomToMove / 2 - - for offset > 0 { - r, size := utf8.DecodeLastRune(orig[0:start]) - if r == utf8.RuneError { - continue OUTER // bail - } - start -= size - - r, size = utf8.DecodeLastRune(orig[0:end]) - if r == utf8.RuneError { - continue OUTER // bail - } - end -= size - offset-- - } - - rv = append(rv, &highlight.Fragment{Orig: orig, Start: start - offset, End: end - offset}) - // set maxbegin to the end of the current term location - // so that next one won't back up to include it - maxbegin = termLocation.End - - } - if len(ot) == 0 { - // if there were no terms to highlight - // produce a single fragment from the beginning - start := 0 - end := start + s.fragmentSize - if end > len(orig) { - end = len(orig) - } - rv = append(rv, &highlight.Fragment{Orig: orig, Start: start, End: end}) - } - - return rv -} - -func Constructor(config map[string]interface{}, cache *registry.Cache) (highlight.Fragmenter, error) { - size := defaultFragmentSize - sizeVal, ok := config["size"].(float64) - if ok { - size = int(sizeVal) - } - return NewFragmenter(size), nil -} - -func init() { - registry.RegisterFragmenter(Name, Constructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter.go b/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter.go deleted file mode 100644 index 3dd9ce053..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package highlight - -import ( - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" -) - -type Fragment struct { - Orig []byte - ArrayPositions []uint64 - Start int - End int - Score float64 - Index int // used by heap -} - -func (f *Fragment) Overlaps(other *Fragment) bool { - if other.Start >= f.Start && other.Start < f.End { - return true - } else if f.Start >= other.Start && f.Start < other.End { - return true - } - return false -} - -type Fragmenter interface { - Fragment([]byte, TermLocations) []*Fragment -} - -type FragmentFormatter interface { - Format(f *Fragment, orderedTermLocations TermLocations) string -} - -type FragmentScorer interface { - Score(f *Fragment) float64 -} - -type Highlighter interface { - Fragmenter() Fragmenter - SetFragmenter(Fragmenter) - - FragmentFormatter() FragmentFormatter - SetFragmentFormatter(FragmentFormatter) - - Separator() string - SetSeparator(string) - - BestFragmentInField(*search.DocumentMatch, index.Document, string) string - BestFragmentsInField(*search.DocumentMatch, index.Document, string, int) []string -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/html/html.go b/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/html/html.go deleted file mode 100644 index ceb686dce..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/html/html.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package html - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/bleve/v2/search/highlight" - htmlFormatter "github.com/blevesearch/bleve/v2/search/highlight/format/html" - simpleFragmenter "github.com/blevesearch/bleve/v2/search/highlight/fragmenter/simple" - simpleHighlighter "github.com/blevesearch/bleve/v2/search/highlight/highlighter/simple" -) - -const Name = "html" - -func Constructor(config map[string]interface{}, cache *registry.Cache) (highlight.Highlighter, error) { - - fragmenter, err := cache.FragmenterNamed(simpleFragmenter.Name) - if err != nil { - return nil, fmt.Errorf("error building fragmenter: %v", err) - } - - formatter, err := cache.FragmentFormatterNamed(htmlFormatter.Name) - if err != nil { - return nil, fmt.Errorf("error building fragment formatter: %v", err) - } - - return simpleHighlighter.NewHighlighter( - fragmenter, - formatter, - simpleHighlighter.DefaultSeparator), - nil -} - -func init() { - registry.RegisterHighlighter(Name, Constructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/simple/fragment_scorer_simple.go b/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/simple/fragment_scorer_simple.go deleted file mode 100644 index 786e33cb3..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/simple/fragment_scorer_simple.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package simple - -import ( - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/highlight" -) - -// FragmentScorer will score fragments by how many -// unique terms occur in the fragment with no regard for -// any boost values used in the original query -type FragmentScorer struct { - tlm search.TermLocationMap -} - -func NewFragmentScorer(tlm search.TermLocationMap) *FragmentScorer { - return &FragmentScorer{ - tlm: tlm, - } -} - -func (s *FragmentScorer) Score(f *highlight.Fragment) { - score := 0.0 -OUTER: - for _, locations := range s.tlm { - for _, location := range locations { - if location.ArrayPositions.Equals(f.ArrayPositions) && int(location.Start) >= f.Start && int(location.End) <= f.End { - score += 1.0 - // once we find a term in the fragment - // don't care about additional matches - continue OUTER - } - } - } - f.Score = score -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/simple/highlighter_simple.go b/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/simple/highlighter_simple.go deleted file mode 100644 index 19949687d..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/highlight/highlighter/simple/highlighter_simple.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package simple - -import ( - "container/heap" - "fmt" - index "github.com/blevesearch/bleve_index_api" - - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/highlight" -) - -const Name = "simple" -const DefaultSeparator = "…" - -type Highlighter struct { - fragmenter highlight.Fragmenter - formatter highlight.FragmentFormatter - sep string -} - -func NewHighlighter(fragmenter highlight.Fragmenter, formatter highlight.FragmentFormatter, separator string) *Highlighter { - return &Highlighter{ - fragmenter: fragmenter, - formatter: formatter, - sep: separator, - } -} - -func (s *Highlighter) Fragmenter() highlight.Fragmenter { - return s.fragmenter -} - -func (s *Highlighter) SetFragmenter(f highlight.Fragmenter) { - s.fragmenter = f -} - -func (s *Highlighter) FragmentFormatter() highlight.FragmentFormatter { - return s.formatter -} - -func (s *Highlighter) SetFragmentFormatter(f highlight.FragmentFormatter) { - s.formatter = f -} - -func (s *Highlighter) Separator() string { - return s.sep -} - -func (s *Highlighter) SetSeparator(sep string) { - s.sep = sep -} - -func (s *Highlighter) BestFragmentInField(dm *search.DocumentMatch, doc index.Document, field string) string { - fragments := s.BestFragmentsInField(dm, doc, field, 1) - if len(fragments) > 0 { - return fragments[0] - } - return "" -} - -func (s *Highlighter) BestFragmentsInField(dm *search.DocumentMatch, doc index.Document, field string, num int) []string { - tlm := dm.Locations[field] - orderedTermLocations := highlight.OrderTermLocations(tlm) - scorer := NewFragmentScorer(tlm) - - // score the fragments and put them into a priority queue ordered by score - fq := make(FragmentQueue, 0) - heap.Init(&fq) - doc.VisitFields(func(f index.Field) { - if f.Name() == field { - _, ok := f.(index.TextField) - if ok { - termLocationsSameArrayPosition := make(highlight.TermLocations, 0) - for _, otl := range orderedTermLocations { - if otl.ArrayPositions.Equals(f.ArrayPositions()) { - termLocationsSameArrayPosition = append(termLocationsSameArrayPosition, otl) - } - } - - fieldData := f.Value() - fragments := s.fragmenter.Fragment(fieldData, termLocationsSameArrayPosition) - for _, fragment := range fragments { - fragment.ArrayPositions = f.ArrayPositions() - scorer.Score(fragment) - heap.Push(&fq, fragment) - } - } - } - }) - - // now find the N best non-overlapping fragments - var bestFragments []*highlight.Fragment - if len(fq) > 0 { - candidate := heap.Pop(&fq) - OUTER: - for candidate != nil && len(bestFragments) < num { - // see if this overlaps with any of the best already identified - if len(bestFragments) > 0 { - for _, frag := range bestFragments { - if candidate.(*highlight.Fragment).Overlaps(frag) { - if len(fq) < 1 { - break OUTER - } - candidate = heap.Pop(&fq) - continue OUTER - } - } - bestFragments = append(bestFragments, candidate.(*highlight.Fragment)) - } else { - bestFragments = append(bestFragments, candidate.(*highlight.Fragment)) - } - - if len(fq) < 1 { - break - } - candidate = heap.Pop(&fq) - } - } - - // now that we have the best fragments, we can format them - orderedTermLocations.MergeOverlapping() - formattedFragments := make([]string, len(bestFragments)) - for i, fragment := range bestFragments { - formattedFragments[i] = "" - if fragment.Start != 0 { - formattedFragments[i] += s.sep - } - formattedFragments[i] += s.formatter.Format(fragment, orderedTermLocations) - if fragment.End != len(fragment.Orig) { - formattedFragments[i] += s.sep - } - } - - if dm.Fragments == nil { - dm.Fragments = make(search.FieldFragmentMap, 0) - } - if len(formattedFragments) > 0 { - dm.Fragments[field] = formattedFragments - } - - return formattedFragments -} - -// FragmentQueue implements heap.Interface and holds Items. -type FragmentQueue []*highlight.Fragment - -func (fq FragmentQueue) Len() int { return len(fq) } - -func (fq FragmentQueue) Less(i, j int) bool { - // We want Pop to give us the highest, not lowest, priority so we use greater-than here. - return fq[i].Score > fq[j].Score -} - -func (fq FragmentQueue) Swap(i, j int) { - fq[i], fq[j] = fq[j], fq[i] - fq[i].Index = i - fq[j].Index = j -} - -func (fq *FragmentQueue) Push(x interface{}) { - n := len(*fq) - item := x.(*highlight.Fragment) - item.Index = n - *fq = append(*fq, item) -} - -func (fq *FragmentQueue) Pop() interface{} { - old := *fq - n := len(old) - item := old[n-1] - item.Index = -1 // for safety - *fq = old[0 : n-1] - return item -} - -func Constructor(config map[string]interface{}, cache *registry.Cache) (highlight.Highlighter, error) { - separator := DefaultSeparator - separatorVal, ok := config["separator"].(string) - if ok { - separator = separatorVal - } - - fragmenterName, ok := config["fragmenter"].(string) - if !ok { - return nil, fmt.Errorf("must specify fragmenter") - } - fragmenter, err := cache.FragmenterNamed(fragmenterName) - if err != nil { - return nil, fmt.Errorf("error building fragmenter: %v", err) - } - - formatterName, ok := config["formatter"].(string) - if !ok { - return nil, fmt.Errorf("must specify formatter") - } - formatter, err := cache.FragmentFormatterNamed(formatterName) - if err != nil { - return nil, fmt.Errorf("error building fragment formatter: %v", err) - } - - return NewHighlighter(fragmenter, formatter, separator), nil -} - -func init() { - registry.RegisterHighlighter(Name, Constructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/highlight/term_locations.go b/vendor/github.com/blevesearch/bleve/v2/search/highlight/term_locations.go deleted file mode 100644 index 6bf385c05..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/highlight/term_locations.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package highlight - -import ( - "reflect" - "sort" - - "github.com/blevesearch/bleve/v2/search" -) - -type TermLocation struct { - Term string - ArrayPositions search.ArrayPositions - Pos int - Start int - End int -} - -func (tl *TermLocation) Overlaps(other *TermLocation) bool { - if reflect.DeepEqual(tl.ArrayPositions, other.ArrayPositions) { - if other.Start >= tl.Start && other.Start < tl.End { - return true - } else if tl.Start >= other.Start && tl.Start < other.End { - return true - } - } - return false -} - -type TermLocations []*TermLocation - -func (t TermLocations) Len() int { return len(t) } -func (t TermLocations) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t TermLocations) Less(i, j int) bool { - - shortestArrayPositions := len(t[i].ArrayPositions) - if len(t[j].ArrayPositions) < shortestArrayPositions { - shortestArrayPositions = len(t[j].ArrayPositions) - } - - // compare all the common array positions - for api := 0; api < shortestArrayPositions; api++ { - if t[i].ArrayPositions[api] < t[j].ArrayPositions[api] { - return true - } - if t[i].ArrayPositions[api] > t[j].ArrayPositions[api] { - return false - } - } - // all the common array positions are the same - if len(t[i].ArrayPositions) < len(t[j].ArrayPositions) { - return true // j array positions, longer so greater - } else if len(t[i].ArrayPositions) > len(t[j].ArrayPositions) { - return false // j array positions, shorter so less - } - - // array positions the same, compare starts - return t[i].Start < t[j].Start -} - -func (t TermLocations) MergeOverlapping() { - var lastTl *TermLocation - for i, tl := range t { - if lastTl == nil && tl != nil { - lastTl = tl - } else if lastTl != nil && tl != nil { - if lastTl.Overlaps(tl) { - // ok merge this with previous - lastTl.End = tl.End - t[i] = nil - } - } - } -} - -func OrderTermLocations(tlm search.TermLocationMap) TermLocations { - rv := make(TermLocations, 0) - for term, locations := range tlm { - for _, location := range locations { - tl := TermLocation{ - Term: term, - ArrayPositions: location.ArrayPositions, - Pos: int(location.Pos), - Start: int(location.Start), - End: int(location.End), - } - rv = append(rv, &tl) - } - } - sort.Sort(rv) - return rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/levenshtein.go b/vendor/github.com/blevesearch/bleve/v2/search/levenshtein.go deleted file mode 100644 index 687608d3f..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/levenshtein.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package search - -import ( - "math" -) - -func LevenshteinDistance(a, b string) int { - la := len(a) - lb := len(b) - d := make([]int, la+1) - var lastdiag, olddiag, temp int - - for i := 1; i <= la; i++ { - d[i] = i - } - for i := 1; i <= lb; i++ { - d[0] = i - lastdiag = i - 1 - for j := 1; j <= la; j++ { - olddiag = d[j] - min := d[j] + 1 - if (d[j-1] + 1) < min { - min = d[j-1] + 1 - } - if a[j-1] == b[i-1] { - temp = 0 - } else { - temp = 1 - } - if (lastdiag + temp) < min { - min = lastdiag + temp - } - d[j] = min - lastdiag = olddiag - } - } - return d[la] -} - -// LevenshteinDistanceMax same as LevenshteinDistance but -// attempts to bail early once we know the distance -// will be greater than max -// in which case the first return val will be the max -// and the second will be true, indicating max was exceeded -func LevenshteinDistanceMax(a, b string, max int) (int, bool) { - v, wasMax, _ := LevenshteinDistanceMaxReuseSlice(a, b, max, nil) - return v, wasMax -} - -func LevenshteinDistanceMaxReuseSlice(a, b string, max int, d []int) (int, bool, []int) { - la := len(a) - lb := len(b) - - ld := int(math.Abs(float64(la - lb))) - if ld > max { - return max, true, d - } - - if cap(d) < la+1 { - d = make([]int, la+1) - } - d = d[:la+1] - - var lastdiag, olddiag, temp int - - for i := 1; i <= la; i++ { - d[i] = i - } - for i := 1; i <= lb; i++ { - d[0] = i - lastdiag = i - 1 - rowmin := max + 1 - for j := 1; j <= la; j++ { - olddiag = d[j] - min := d[j] + 1 - if (d[j-1] + 1) < min { - min = d[j-1] + 1 - } - if a[j-1] == b[i-1] { - temp = 0 - } else { - temp = 1 - } - if (lastdiag + temp) < min { - min = lastdiag + temp - } - if min < rowmin { - rowmin = min - } - d[j] = min - - lastdiag = olddiag - } - // after each row if rowmin isn't less than max stop - if rowmin > max { - return max, true, d - } - } - return d[la], false, d -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/bool_field.go b/vendor/github.com/blevesearch/bleve/v2/search/query/bool_field.go deleted file mode 100644 index 0272a2feb..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/bool_field.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type BoolFieldQuery struct { - Bool bool `json:"bool"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewBoolFieldQuery creates a new Query for boolean fields -func NewBoolFieldQuery(val bool) *BoolFieldQuery { - return &BoolFieldQuery{ - Bool: val, - } -} - -func (q *BoolFieldQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *BoolFieldQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *BoolFieldQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *BoolFieldQuery) Field() string { - return q.FieldVal -} - -func (q *BoolFieldQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - term := "F" - if q.Bool { - term = "T" - } - return searcher.NewTermSearcher(i, term, field, q.BoostVal.Value(), options) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/boolean.go b/vendor/github.com/blevesearch/bleve/v2/search/query/boolean.go deleted file mode 100644 index b9c504f85..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/boolean.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - "fmt" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type BooleanQuery struct { - Must Query `json:"must,omitempty"` - Should Query `json:"should,omitempty"` - MustNot Query `json:"must_not,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` - queryStringMode bool -} - -// NewBooleanQuery creates a compound Query composed -// of several other Query objects. -// Result documents must satisfy ALL of the -// must Queries. -// Result documents must satisfy NONE of the must not -// Queries. -// Result documents that ALSO satisfy any of the should -// Queries will score higher. -func NewBooleanQuery(must []Query, should []Query, mustNot []Query) *BooleanQuery { - - rv := BooleanQuery{} - if len(must) > 0 { - rv.Must = NewConjunctionQuery(must) - } - if len(should) > 0 { - rv.Should = NewDisjunctionQuery(should) - } - if len(mustNot) > 0 { - rv.MustNot = NewDisjunctionQuery(mustNot) - } - - return &rv -} - -func NewBooleanQueryForQueryString(must []Query, should []Query, mustNot []Query) *BooleanQuery { - rv := NewBooleanQuery(nil, nil, nil) - rv.queryStringMode = true - rv.AddMust(must...) - rv.AddShould(should...) - rv.AddMustNot(mustNot...) - return rv -} - -// SetMinShould requires that at least minShould of the -// should Queries must be satisfied. -func (q *BooleanQuery) SetMinShould(minShould float64) { - q.Should.(*DisjunctionQuery).SetMin(minShould) -} - -func (q *BooleanQuery) AddMust(m ...Query) { - if q.Must == nil { - tmp := NewConjunctionQuery([]Query{}) - tmp.queryStringMode = q.queryStringMode - q.Must = tmp - } - for _, mq := range m { - q.Must.(*ConjunctionQuery).AddQuery(mq) - } -} - -func (q *BooleanQuery) AddShould(m ...Query) { - if q.Should == nil { - tmp := NewDisjunctionQuery([]Query{}) - tmp.queryStringMode = q.queryStringMode - q.Should = tmp - } - for _, mq := range m { - q.Should.(*DisjunctionQuery).AddQuery(mq) - } -} - -func (q *BooleanQuery) AddMustNot(m ...Query) { - if q.MustNot == nil { - tmp := NewDisjunctionQuery([]Query{}) - tmp.queryStringMode = q.queryStringMode - q.MustNot = tmp - } - for _, mq := range m { - q.MustNot.(*DisjunctionQuery).AddQuery(mq) - } -} - -func (q *BooleanQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *BooleanQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *BooleanQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - var err error - var mustNotSearcher search.Searcher - if q.MustNot != nil { - mustNotSearcher, err = q.MustNot.Searcher(i, m, options) - if err != nil { - return nil, err - } - // if must not is MatchNone, reset it to nil - if _, ok := mustNotSearcher.(*searcher.MatchNoneSearcher); ok { - mustNotSearcher = nil - } - } - - var mustSearcher search.Searcher - if q.Must != nil { - mustSearcher, err = q.Must.Searcher(i, m, options) - if err != nil { - return nil, err - } - // if must searcher is MatchNone, reset it to nil - if _, ok := mustSearcher.(*searcher.MatchNoneSearcher); ok { - mustSearcher = nil - } - } - - var shouldSearcher search.Searcher - if q.Should != nil { - shouldSearcher, err = q.Should.Searcher(i, m, options) - if err != nil { - return nil, err - } - // if should searcher is MatchNone, reset it to nil - if _, ok := shouldSearcher.(*searcher.MatchNoneSearcher); ok { - shouldSearcher = nil - } - } - - // if all 3 are nil, return MatchNone - if mustSearcher == nil && shouldSearcher == nil && mustNotSearcher == nil { - return searcher.NewMatchNoneSearcher(i) - } - - // if only mustNotSearcher, start with MatchAll - if mustSearcher == nil && shouldSearcher == nil && mustNotSearcher != nil { - mustSearcher, err = searcher.NewMatchAllSearcher(i, 1.0, options) - if err != nil { - return nil, err - } - } - - // optimization, if only should searcher, just return it instead - if mustSearcher == nil && shouldSearcher != nil && mustNotSearcher == nil { - return shouldSearcher, nil - } - - return searcher.NewBooleanSearcher(i, mustSearcher, shouldSearcher, mustNotSearcher, options) -} - -func (q *BooleanQuery) Validate() error { - if qm, ok := q.Must.(ValidatableQuery); ok { - err := qm.Validate() - if err != nil { - return err - } - } - if qs, ok := q.Should.(ValidatableQuery); ok { - err := qs.Validate() - if err != nil { - return err - } - } - if qmn, ok := q.MustNot.(ValidatableQuery); ok { - err := qmn.Validate() - if err != nil { - return err - } - } - if q.Must == nil && q.Should == nil && q.MustNot == nil { - return fmt.Errorf("boolean query must contain at least one must or should or not must clause") - } - return nil -} - -func (q *BooleanQuery) UnmarshalJSON(data []byte) error { - tmp := struct { - Must json.RawMessage `json:"must,omitempty"` - Should json.RawMessage `json:"should,omitempty"` - MustNot json.RawMessage `json:"must_not,omitempty"` - Boost *Boost `json:"boost,omitempty"` - }{} - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - - if tmp.Must != nil { - q.Must, err = ParseQuery(tmp.Must) - if err != nil { - return err - } - _, isConjunctionQuery := q.Must.(*ConjunctionQuery) - if !isConjunctionQuery { - return fmt.Errorf("must clause must be conjunction") - } - } - - if tmp.Should != nil { - q.Should, err = ParseQuery(tmp.Should) - if err != nil { - return err - } - _, isDisjunctionQuery := q.Should.(*DisjunctionQuery) - if !isDisjunctionQuery { - return fmt.Errorf("should clause must be disjunction") - } - } - - if tmp.MustNot != nil { - q.MustNot, err = ParseQuery(tmp.MustNot) - if err != nil { - return err - } - _, isDisjunctionQuery := q.MustNot.(*DisjunctionQuery) - if !isDisjunctionQuery { - return fmt.Errorf("must not clause must be disjunction") - } - } - - q.BoostVal = tmp.Boost - - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/conjunction.go b/vendor/github.com/blevesearch/bleve/v2/search/query/conjunction.go deleted file mode 100644 index 7d647646e..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/conjunction.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type ConjunctionQuery struct { - Conjuncts []Query `json:"conjuncts"` - BoostVal *Boost `json:"boost,omitempty"` - queryStringMode bool -} - -// NewConjunctionQuery creates a new compound Query. -// Result documents must satisfy all of the queries. -func NewConjunctionQuery(conjuncts []Query) *ConjunctionQuery { - return &ConjunctionQuery{ - Conjuncts: conjuncts, - } -} - -func (q *ConjunctionQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *ConjunctionQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *ConjunctionQuery) AddQuery(aq ...Query) { - for _, aaq := range aq { - q.Conjuncts = append(q.Conjuncts, aaq) - } -} - -func (q *ConjunctionQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - ss := make([]search.Searcher, 0, len(q.Conjuncts)) - for _, conjunct := range q.Conjuncts { - sr, err := conjunct.Searcher(i, m, options) - if err != nil { - for _, searcher := range ss { - if searcher != nil { - _ = searcher.Close() - } - } - return nil, err - } - if _, ok := sr.(*searcher.MatchNoneSearcher); ok && q.queryStringMode { - // in query string mode, skip match none - continue - } - ss = append(ss, sr) - } - - if len(ss) < 1 { - return searcher.NewMatchNoneSearcher(i) - } - - return searcher.NewConjunctionSearcher(i, ss, options) -} - -func (q *ConjunctionQuery) Validate() error { - for _, q := range q.Conjuncts { - if q, ok := q.(ValidatableQuery); ok { - err := q.Validate() - if err != nil { - return err - } - } - } - return nil -} - -func (q *ConjunctionQuery) UnmarshalJSON(data []byte) error { - tmp := struct { - Conjuncts []json.RawMessage `json:"conjuncts"` - Boost *Boost `json:"boost,omitempty"` - }{} - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - q.Conjuncts = make([]Query, len(tmp.Conjuncts)) - for i, term := range tmp.Conjuncts { - query, err := ParseQuery(term) - if err != nil { - return err - } - q.Conjuncts[i] = query - } - q.BoostVal = tmp.Boost - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/date_range.go b/vendor/github.com/blevesearch/bleve/v2/search/query/date_range.go deleted file mode 100644 index 290786ddb..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/date_range.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - "fmt" - "math" - "time" - - "github.com/blevesearch/bleve/v2/analysis/datetime/optional" - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/numeric" - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -// QueryDateTimeParser controls the default query date time parser -var QueryDateTimeParser = optional.Name - -// QueryDateTimeFormat controls the format when Marshaling to JSON -var QueryDateTimeFormat = time.RFC3339 - -var cache = registry.NewCache() - -type BleveQueryTime struct { - time.Time -} - -var MinRFC3339CompatibleTime time.Time -var MaxRFC3339CompatibleTime time.Time - -func init() { - MinRFC3339CompatibleTime, _ = time.Parse(time.RFC3339, "1677-12-01T00:00:00Z") - MaxRFC3339CompatibleTime, _ = time.Parse(time.RFC3339, "2262-04-11T11:59:59Z") -} - -func queryTimeFromString(t string) (time.Time, error) { - dateTimeParser, err := cache.DateTimeParserNamed(QueryDateTimeParser) - if err != nil { - return time.Time{}, err - } - rv, err := dateTimeParser.ParseDateTime(t) - if err != nil { - return time.Time{}, err - } - return rv, nil -} - -func (t *BleveQueryTime) MarshalJSON() ([]byte, error) { - tt := time.Time(t.Time) - return []byte("\"" + tt.Format(QueryDateTimeFormat) + "\""), nil -} - -func (t *BleveQueryTime) UnmarshalJSON(data []byte) error { - var timeString string - err := json.Unmarshal(data, &timeString) - if err != nil { - return err - } - dateTimeParser, err := cache.DateTimeParserNamed(QueryDateTimeParser) - if err != nil { - return err - } - t.Time, err = dateTimeParser.ParseDateTime(timeString) - if err != nil { - return err - } - return nil -} - -type DateRangeQuery struct { - Start BleveQueryTime `json:"start,omitempty"` - End BleveQueryTime `json:"end,omitempty"` - InclusiveStart *bool `json:"inclusive_start,omitempty"` - InclusiveEnd *bool `json:"inclusive_end,omitempty"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewDateRangeQuery creates a new Query for ranges -// of date values. -// Date strings are parsed using the DateTimeParser configured in the -// top-level config.QueryDateTimeParser -// Either, but not both endpoints can be nil. -func NewDateRangeQuery(start, end time.Time) *DateRangeQuery { - return NewDateRangeInclusiveQuery(start, end, nil, nil) -} - -// NewDateRangeInclusiveQuery creates a new Query for ranges -// of date values. -// Date strings are parsed using the DateTimeParser configured in the -// top-level config.QueryDateTimeParser -// Either, but not both endpoints can be nil. -// startInclusive and endInclusive control inclusion of the endpoints. -func NewDateRangeInclusiveQuery(start, end time.Time, startInclusive, endInclusive *bool) *DateRangeQuery { - return &DateRangeQuery{ - Start: BleveQueryTime{start}, - End: BleveQueryTime{end}, - InclusiveStart: startInclusive, - InclusiveEnd: endInclusive, - } -} - -func (q *DateRangeQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *DateRangeQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *DateRangeQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *DateRangeQuery) Field() string { - return q.FieldVal -} - -func (q *DateRangeQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - min, max, err := q.parseEndpoints() - if err != nil { - return nil, err - } - - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - - return searcher.NewNumericRangeSearcher(i, min, max, q.InclusiveStart, q.InclusiveEnd, field, q.BoostVal.Value(), options) -} - -func (q *DateRangeQuery) parseEndpoints() (*float64, *float64, error) { - min := math.Inf(-1) - max := math.Inf(1) - if !q.Start.IsZero() { - if !isDatetimeCompatible(q.Start) { - // overflow - return nil, nil, fmt.Errorf("invalid/unsupported date range, start: %v", q.Start) - } - startInt64 := q.Start.UnixNano() - min = numeric.Int64ToFloat64(startInt64) - } - if !q.End.IsZero() { - if !isDatetimeCompatible(q.End) { - // overflow - return nil, nil, fmt.Errorf("invalid/unsupported date range, end: %v", q.End) - } - endInt64 := q.End.UnixNano() - max = numeric.Int64ToFloat64(endInt64) - } - - return &min, &max, nil -} - -func (q *DateRangeQuery) Validate() error { - if q.Start.IsZero() && q.End.IsZero() { - return fmt.Errorf("must specify start or end") - } - _, _, err := q.parseEndpoints() - if err != nil { - return err - } - return nil -} - -func isDatetimeCompatible(t BleveQueryTime) bool { - if QueryDateTimeFormat == time.RFC3339 && - (t.Before(MinRFC3339CompatibleTime) || t.After(MaxRFC3339CompatibleTime)) { - return false - } - - return true -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/disjunction.go b/vendor/github.com/blevesearch/bleve/v2/search/query/disjunction.go deleted file mode 100644 index 50957fa67..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/disjunction.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - "fmt" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type DisjunctionQuery struct { - Disjuncts []Query `json:"disjuncts"` - BoostVal *Boost `json:"boost,omitempty"` - Min float64 `json:"min"` - queryStringMode bool -} - -// NewDisjunctionQuery creates a new compound Query. -// Result documents satisfy at least one Query. -func NewDisjunctionQuery(disjuncts []Query) *DisjunctionQuery { - return &DisjunctionQuery{ - Disjuncts: disjuncts, - } -} - -func (q *DisjunctionQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *DisjunctionQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *DisjunctionQuery) AddQuery(aq ...Query) { - for _, aaq := range aq { - q.Disjuncts = append(q.Disjuncts, aaq) - } -} - -func (q *DisjunctionQuery) SetMin(m float64) { - q.Min = m -} - -func (q *DisjunctionQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, - options search.SearcherOptions) (search.Searcher, error) { - ss := make([]search.Searcher, 0, len(q.Disjuncts)) - for _, disjunct := range q.Disjuncts { - sr, err := disjunct.Searcher(i, m, options) - if err != nil { - for _, searcher := range ss { - if searcher != nil { - _ = searcher.Close() - } - } - return nil, err - } - if _, ok := sr.(*searcher.MatchNoneSearcher); ok && q.queryStringMode { - // in query string mode, skip match none - continue - } - ss = append(ss, sr) - } - - if len(ss) < 1 { - return searcher.NewMatchNoneSearcher(i) - } - - return searcher.NewDisjunctionSearcher(i, ss, q.Min, options) -} - -func (q *DisjunctionQuery) Validate() error { - if int(q.Min) > len(q.Disjuncts) { - return fmt.Errorf("disjunction query has fewer than the minimum number of clauses to satisfy") - } - for _, q := range q.Disjuncts { - if q, ok := q.(ValidatableQuery); ok { - err := q.Validate() - if err != nil { - return err - } - } - } - return nil -} - -func (q *DisjunctionQuery) UnmarshalJSON(data []byte) error { - tmp := struct { - Disjuncts []json.RawMessage `json:"disjuncts"` - Boost *Boost `json:"boost,omitempty"` - Min float64 `json:"min"` - }{} - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - q.Disjuncts = make([]Query, len(tmp.Disjuncts)) - for i, term := range tmp.Disjuncts { - query, err := ParseQuery(term) - if err != nil { - return err - } - q.Disjuncts[i] = query - } - q.BoostVal = tmp.Boost - q.Min = tmp.Min - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/docid.go b/vendor/github.com/blevesearch/bleve/v2/search/query/docid.go deleted file mode 100644 index 1d273394d..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/docid.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type DocIDQuery struct { - IDs []string `json:"ids"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewDocIDQuery creates a new Query object returning indexed documents among -// the specified set. Combine it with ConjunctionQuery to restrict the scope of -// other queries output. -func NewDocIDQuery(ids []string) *DocIDQuery { - return &DocIDQuery{ - IDs: ids, - } -} - -func (q *DocIDQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *DocIDQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *DocIDQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - return searcher.NewDocIDSearcher(i, q.IDs, q.BoostVal.Value(), options) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/fuzzy.go b/vendor/github.com/blevesearch/bleve/v2/search/query/fuzzy.go deleted file mode 100644 index aceaa802d..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/fuzzy.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type FuzzyQuery struct { - Term string `json:"term"` - Prefix int `json:"prefix_length"` - Fuzziness int `json:"fuzziness"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewFuzzyQuery creates a new Query which finds -// documents containing terms within a specific -// fuzziness of the specified term. -// The default fuzziness is 1. -// -// The current implementation uses Levenshtein edit -// distance as the fuzziness metric. -func NewFuzzyQuery(term string) *FuzzyQuery { - return &FuzzyQuery{ - Term: term, - Fuzziness: 1, - } -} - -func (q *FuzzyQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *FuzzyQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *FuzzyQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *FuzzyQuery) Field() string { - return q.FieldVal -} - -func (q *FuzzyQuery) SetFuzziness(f int) { - q.Fuzziness = f -} - -func (q *FuzzyQuery) SetPrefix(p int) { - q.Prefix = p -} - -func (q *FuzzyQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - return searcher.NewFuzzySearcher(i, q.Term, q.Prefix, q.Fuzziness, field, q.BoostVal.Value(), options) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingbox.go b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingbox.go deleted file mode 100644 index be4b5a8b9..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingbox.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - "fmt" - - "github.com/blevesearch/bleve/v2/geo" - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type GeoBoundingBoxQuery struct { - TopLeft []float64 `json:"top_left,omitempty"` - BottomRight []float64 `json:"bottom_right,omitempty"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -func NewGeoBoundingBoxQuery(topLeftLon, topLeftLat, bottomRightLon, bottomRightLat float64) *GeoBoundingBoxQuery { - return &GeoBoundingBoxQuery{ - TopLeft: []float64{topLeftLon, topLeftLat}, - BottomRight: []float64{bottomRightLon, bottomRightLat}, - } -} - -func (q *GeoBoundingBoxQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *GeoBoundingBoxQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *GeoBoundingBoxQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *GeoBoundingBoxQuery) Field() string { - return q.FieldVal -} - -func (q *GeoBoundingBoxQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - - if q.BottomRight[0] < q.TopLeft[0] { - // cross date line, rewrite as two parts - - leftSearcher, err := searcher.NewGeoBoundingBoxSearcher(i, -180, q.BottomRight[1], q.BottomRight[0], q.TopLeft[1], field, q.BoostVal.Value(), options, true) - if err != nil { - return nil, err - } - rightSearcher, err := searcher.NewGeoBoundingBoxSearcher(i, q.TopLeft[0], q.BottomRight[1], 180, q.TopLeft[1], field, q.BoostVal.Value(), options, true) - if err != nil { - _ = leftSearcher.Close() - return nil, err - } - - return searcher.NewDisjunctionSearcher(i, []search.Searcher{leftSearcher, rightSearcher}, 0, options) - } - - return searcher.NewGeoBoundingBoxSearcher(i, q.TopLeft[0], q.BottomRight[1], q.BottomRight[0], q.TopLeft[1], field, q.BoostVal.Value(), options, true) -} - -func (q *GeoBoundingBoxQuery) Validate() error { - return nil -} - -func (q *GeoBoundingBoxQuery) UnmarshalJSON(data []byte) error { - tmp := struct { - TopLeft interface{} `json:"top_left,omitempty"` - BottomRight interface{} `json:"bottom_right,omitempty"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` - }{} - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - // now use our generic point parsing code from the geo package - lon, lat, found := geo.ExtractGeoPoint(tmp.TopLeft) - if !found { - return fmt.Errorf("geo location top_left not in a valid format") - } - q.TopLeft = []float64{lon, lat} - lon, lat, found = geo.ExtractGeoPoint(tmp.BottomRight) - if !found { - return fmt.Errorf("geo location bottom_right not in a valid format") - } - q.BottomRight = []float64{lon, lat} - q.FieldVal = tmp.FieldVal - q.BoostVal = tmp.BoostVal - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingpolygon.go b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingpolygon.go deleted file mode 100644 index abb8ccd7c..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingpolygon.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2019 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - "fmt" - - "github.com/blevesearch/bleve/v2/geo" - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type GeoBoundingPolygonQuery struct { - Points []geo.Point `json:"polygon_points"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -func NewGeoBoundingPolygonQuery(points []geo.Point) *GeoBoundingPolygonQuery { - return &GeoBoundingPolygonQuery{ - Points: points} -} - -func (q *GeoBoundingPolygonQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *GeoBoundingPolygonQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *GeoBoundingPolygonQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *GeoBoundingPolygonQuery) Field() string { - return q.FieldVal -} - -func (q *GeoBoundingPolygonQuery) Searcher(i index.IndexReader, - m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - - return searcher.NewGeoBoundedPolygonSearcher(i, q.Points, field, q.BoostVal.Value(), options) -} - -func (q *GeoBoundingPolygonQuery) Validate() error { - return nil -} - -func (q *GeoBoundingPolygonQuery) UnmarshalJSON(data []byte) error { - tmp := struct { - Points []interface{} `json:"polygon_points"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` - }{} - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - - q.Points = make([]geo.Point, 0, len(tmp.Points)) - for _, i := range tmp.Points { - // now use our generic point parsing code from the geo package - lon, lat, found := geo.ExtractGeoPoint(i) - if !found { - return fmt.Errorf("geo polygon point: %v is not in a valid format", i) - } - q.Points = append(q.Points, geo.Point{Lon: lon, Lat: lat}) - } - - q.FieldVal = tmp.FieldVal - q.BoostVal = tmp.BoostVal - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_distance.go b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_distance.go deleted file mode 100644 index d5174c227..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_distance.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - "fmt" - - "github.com/blevesearch/bleve/v2/geo" - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type GeoDistanceQuery struct { - Location []float64 `json:"location,omitempty"` - Distance string `json:"distance,omitempty"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -func NewGeoDistanceQuery(lon, lat float64, distance string) *GeoDistanceQuery { - return &GeoDistanceQuery{ - Location: []float64{lon, lat}, - Distance: distance, - } -} - -func (q *GeoDistanceQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *GeoDistanceQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *GeoDistanceQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *GeoDistanceQuery) Field() string { - return q.FieldVal -} - -func (q *GeoDistanceQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, - options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - - dist, err := geo.ParseDistance(q.Distance) - if err != nil { - return nil, err - } - - return searcher.NewGeoPointDistanceSearcher(i, q.Location[0], q.Location[1], - dist, field, q.BoostVal.Value(), options) -} - -func (q *GeoDistanceQuery) Validate() error { - return nil -} - -func (q *GeoDistanceQuery) UnmarshalJSON(data []byte) error { - tmp := struct { - Location interface{} `json:"location,omitempty"` - Distance string `json:"distance,omitempty"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` - }{} - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - // now use our generic point parsing code from the geo package - lon, lat, found := geo.ExtractGeoPoint(tmp.Location) - if !found { - return fmt.Errorf("geo location not in a valid format") - } - q.Location = []float64{lon, lat} - q.Distance = tmp.Distance - q.FieldVal = tmp.FieldVal - q.BoostVal = tmp.BoostVal - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/match.go b/vendor/github.com/blevesearch/bleve/v2/search/query/match.go deleted file mode 100644 index da1dc091f..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/match.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - "fmt" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" -) - -type MatchQuery struct { - Match string `json:"match"` - FieldVal string `json:"field,omitempty"` - Analyzer string `json:"analyzer,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` - Prefix int `json:"prefix_length"` - Fuzziness int `json:"fuzziness"` - Operator MatchQueryOperator `json:"operator,omitempty"` -} - -type MatchQueryOperator int - -const ( - // Document must satisfy AT LEAST ONE of term searches. - MatchQueryOperatorOr = MatchQueryOperator(0) - // Document must satisfy ALL of term searches. - MatchQueryOperatorAnd = MatchQueryOperator(1) -) - -func (o MatchQueryOperator) MarshalJSON() ([]byte, error) { - switch o { - case MatchQueryOperatorOr: - return json.Marshal("or") - case MatchQueryOperatorAnd: - return json.Marshal("and") - default: - return nil, fmt.Errorf("cannot marshal match operator %d to JSON", o) - } -} - -func (o *MatchQueryOperator) UnmarshalJSON(data []byte) error { - var operatorString string - err := json.Unmarshal(data, &operatorString) - if err != nil { - return err - } - - switch operatorString { - case "or": - *o = MatchQueryOperatorOr - return nil - case "and": - *o = MatchQueryOperatorAnd - return nil - default: - return fmt.Errorf("cannot unmarshal match operator '%v' from JSON", o) - } -} - -// NewMatchQuery creates a Query for matching text. -// An Analyzer is chosen based on the field. -// Input text is analyzed using this analyzer. -// Token terms resulting from this analysis are -// used to perform term searches. Result documents -// must satisfy at least one of these term searches. -func NewMatchQuery(match string) *MatchQuery { - return &MatchQuery{ - Match: match, - Operator: MatchQueryOperatorOr, - } -} - -func (q *MatchQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *MatchQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *MatchQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *MatchQuery) Field() string { - return q.FieldVal -} - -func (q *MatchQuery) SetFuzziness(f int) { - q.Fuzziness = f -} - -func (q *MatchQuery) SetPrefix(p int) { - q.Prefix = p -} - -func (q *MatchQuery) SetOperator(operator MatchQueryOperator) { - q.Operator = operator -} - -func (q *MatchQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - - analyzerName := "" - if q.Analyzer != "" { - analyzerName = q.Analyzer - } else { - analyzerName = m.AnalyzerNameForPath(field) - } - analyzer := m.AnalyzerNamed(analyzerName) - - if analyzer == nil { - return nil, fmt.Errorf("no analyzer named '%s' registered", q.Analyzer) - } - - tokens := analyzer.Analyze([]byte(q.Match)) - if len(tokens) > 0 { - - tqs := make([]Query, len(tokens)) - if q.Fuzziness != 0 { - for i, token := range tokens { - query := NewFuzzyQuery(string(token.Term)) - query.SetFuzziness(q.Fuzziness) - query.SetPrefix(q.Prefix) - query.SetField(field) - query.SetBoost(q.BoostVal.Value()) - tqs[i] = query - } - } else { - for i, token := range tokens { - tq := NewTermQuery(string(token.Term)) - tq.SetField(field) - tq.SetBoost(q.BoostVal.Value()) - tqs[i] = tq - } - } - - switch q.Operator { - case MatchQueryOperatorOr: - shouldQuery := NewDisjunctionQuery(tqs) - shouldQuery.SetMin(1) - shouldQuery.SetBoost(q.BoostVal.Value()) - return shouldQuery.Searcher(i, m, options) - - case MatchQueryOperatorAnd: - mustQuery := NewConjunctionQuery(tqs) - mustQuery.SetBoost(q.BoostVal.Value()) - return mustQuery.Searcher(i, m, options) - - default: - return nil, fmt.Errorf("unhandled operator %d", q.Operator) - } - } - noneQuery := NewMatchNoneQuery() - return noneQuery.Searcher(i, m, options) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/match_all.go b/vendor/github.com/blevesearch/bleve/v2/search/query/match_all.go deleted file mode 100644 index a31f25abc..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/match_all.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type MatchAllQuery struct { - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewMatchAllQuery creates a Query which will -// match all documents in the index. -func NewMatchAllQuery() *MatchAllQuery { - return &MatchAllQuery{} -} - -func (q *MatchAllQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *MatchAllQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *MatchAllQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - return searcher.NewMatchAllSearcher(i, q.BoostVal.Value(), options) -} - -func (q *MatchAllQuery) MarshalJSON() ([]byte, error) { - tmp := map[string]interface{}{ - "boost": q.BoostVal, - "match_all": map[string]interface{}{}, - } - return json.Marshal(tmp) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/match_none.go b/vendor/github.com/blevesearch/bleve/v2/search/query/match_none.go deleted file mode 100644 index 69b44186c..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/match_none.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type MatchNoneQuery struct { - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewMatchNoneQuery creates a Query which will not -// match any documents in the index. -func NewMatchNoneQuery() *MatchNoneQuery { - return &MatchNoneQuery{} -} - -func (q *MatchNoneQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *MatchNoneQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *MatchNoneQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - return searcher.NewMatchNoneSearcher(i) -} - -func (q *MatchNoneQuery) MarshalJSON() ([]byte, error) { - tmp := map[string]interface{}{ - "boost": q.BoostVal, - "match_none": map[string]interface{}{}, - } - return json.Marshal(tmp) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/match_phrase.go b/vendor/github.com/blevesearch/bleve/v2/search/query/match_phrase.go deleted file mode 100644 index 057245fbc..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/match_phrase.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" -) - -type MatchPhraseQuery struct { - MatchPhrase string `json:"match_phrase"` - FieldVal string `json:"field,omitempty"` - Analyzer string `json:"analyzer,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewMatchPhraseQuery creates a new Query object -// for matching phrases in the index. -// An Analyzer is chosen based on the field. -// Input text is analyzed using this analyzer. -// Token terms resulting from this analysis are -// used to build a search phrase. Result documents -// must match this phrase. Queried field must have been indexed with -// IncludeTermVectors set to true. -func NewMatchPhraseQuery(matchPhrase string) *MatchPhraseQuery { - return &MatchPhraseQuery{ - MatchPhrase: matchPhrase, - } -} - -func (q *MatchPhraseQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *MatchPhraseQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *MatchPhraseQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *MatchPhraseQuery) Field() string { - return q.FieldVal -} - -func (q *MatchPhraseQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - - analyzerName := "" - if q.Analyzer != "" { - analyzerName = q.Analyzer - } else { - analyzerName = m.AnalyzerNameForPath(field) - } - analyzer := m.AnalyzerNamed(analyzerName) - if analyzer == nil { - return nil, fmt.Errorf("no analyzer named '%s' registered", q.Analyzer) - } - - tokens := analyzer.Analyze([]byte(q.MatchPhrase)) - if len(tokens) > 0 { - phrase := tokenStreamToPhrase(tokens) - phraseQuery := NewMultiPhraseQuery(phrase, field) - phraseQuery.SetBoost(q.BoostVal.Value()) - return phraseQuery.Searcher(i, m, options) - } - noneQuery := NewMatchNoneQuery() - return noneQuery.Searcher(i, m, options) -} - -func tokenStreamToPhrase(tokens analysis.TokenStream) [][]string { - firstPosition := int(^uint(0) >> 1) - lastPosition := 0 - for _, token := range tokens { - if token.Position < firstPosition { - firstPosition = token.Position - } - if token.Position > lastPosition { - lastPosition = token.Position - } - } - phraseLen := lastPosition - firstPosition + 1 - if phraseLen > 0 { - rv := make([][]string, phraseLen) - for _, token := range tokens { - pos := token.Position - firstPosition - rv[pos] = append(rv[pos], string(token.Term)) - } - return rv - } - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/multi_phrase.go b/vendor/github.com/blevesearch/bleve/v2/search/query/multi_phrase.go deleted file mode 100644 index d75dc0c89..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/multi_phrase.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - "fmt" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type MultiPhraseQuery struct { - Terms [][]string `json:"terms"` - Field string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewMultiPhraseQuery creates a new Query for finding -// term phrases in the index. -// It is like PhraseQuery, but each position in the -// phrase may be satisfied by a list of terms -// as opposed to just one. -// At least one of the terms must exist in the correct -// order, at the correct index offsets, in the -// specified field. Queried field must have been indexed with -// IncludeTermVectors set to true. -func NewMultiPhraseQuery(terms [][]string, field string) *MultiPhraseQuery { - return &MultiPhraseQuery{ - Terms: terms, - Field: field, - } -} - -func (q *MultiPhraseQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *MultiPhraseQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *MultiPhraseQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - return searcher.NewMultiPhraseSearcher(i, q.Terms, q.Field, options) -} - -func (q *MultiPhraseQuery) Validate() error { - if len(q.Terms) < 1 { - return fmt.Errorf("phrase query must contain at least one term") - } - return nil -} - -func (q *MultiPhraseQuery) UnmarshalJSON(data []byte) error { - type _mphraseQuery MultiPhraseQuery - tmp := _mphraseQuery{} - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - q.Terms = tmp.Terms - q.Field = tmp.Field - q.BoostVal = tmp.BoostVal - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/numeric_range.go b/vendor/github.com/blevesearch/bleve/v2/search/query/numeric_range.go deleted file mode 100644 index a1fe7b6c1..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/numeric_range.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type NumericRangeQuery struct { - Min *float64 `json:"min,omitempty"` - Max *float64 `json:"max,omitempty"` - InclusiveMin *bool `json:"inclusive_min,omitempty"` - InclusiveMax *bool `json:"inclusive_max,omitempty"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewNumericRangeQuery creates a new Query for ranges -// of numeric values. -// Either, but not both endpoints can be nil. -// The minimum value is inclusive. -// The maximum value is exclusive. -func NewNumericRangeQuery(min, max *float64) *NumericRangeQuery { - return NewNumericRangeInclusiveQuery(min, max, nil, nil) -} - -// NewNumericRangeInclusiveQuery creates a new Query for ranges -// of numeric values. -// Either, but not both endpoints can be nil. -// Control endpoint inclusion with inclusiveMin, inclusiveMax. -func NewNumericRangeInclusiveQuery(min, max *float64, minInclusive, maxInclusive *bool) *NumericRangeQuery { - return &NumericRangeQuery{ - Min: min, - Max: max, - InclusiveMin: minInclusive, - InclusiveMax: maxInclusive, - } -} - -func (q *NumericRangeQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *NumericRangeQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *NumericRangeQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *NumericRangeQuery) Field() string { - return q.FieldVal -} - -func (q *NumericRangeQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - return searcher.NewNumericRangeSearcher(i, q.Min, q.Max, q.InclusiveMin, q.InclusiveMax, field, q.BoostVal.Value(), options) -} - -func (q *NumericRangeQuery) Validate() error { - if q.Min == nil && q.Min == q.Max { - return fmt.Errorf("numeric range query must specify min or max") - } - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/phrase.go b/vendor/github.com/blevesearch/bleve/v2/search/query/phrase.go deleted file mode 100644 index d6da11853..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/phrase.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - "fmt" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type PhraseQuery struct { - Terms []string `json:"terms"` - Field string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewPhraseQuery creates a new Query for finding -// exact term phrases in the index. -// The provided terms must exist in the correct -// order, at the correct index offsets, in the -// specified field. Queried field must have been indexed with -// IncludeTermVectors set to true. -func NewPhraseQuery(terms []string, field string) *PhraseQuery { - return &PhraseQuery{ - Terms: terms, - Field: field, - } -} - -func (q *PhraseQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *PhraseQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *PhraseQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - return searcher.NewPhraseSearcher(i, q.Terms, q.Field, options) -} - -func (q *PhraseQuery) Validate() error { - if len(q.Terms) < 1 { - return fmt.Errorf("phrase query must contain at least one term") - } - return nil -} - -func (q *PhraseQuery) UnmarshalJSON(data []byte) error { - type _phraseQuery PhraseQuery - tmp := _phraseQuery{} - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - q.Terms = tmp.Terms - q.Field = tmp.Field - q.BoostVal = tmp.BoostVal - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/prefix.go b/vendor/github.com/blevesearch/bleve/v2/search/query/prefix.go deleted file mode 100644 index 05dc40c04..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/prefix.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type PrefixQuery struct { - Prefix string `json:"prefix"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewPrefixQuery creates a new Query which finds -// documents containing terms that start with the -// specified prefix. -func NewPrefixQuery(prefix string) *PrefixQuery { - return &PrefixQuery{ - Prefix: prefix, - } -} - -func (q *PrefixQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *PrefixQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *PrefixQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *PrefixQuery) Field() string { - return q.FieldVal -} - -func (q *PrefixQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - return searcher.NewTermPrefixSearcher(i, q.Prefix, field, q.BoostVal.Value(), options) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/query.go b/vendor/github.com/blevesearch/bleve/v2/search/query/query.go deleted file mode 100644 index 7f2781c37..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/query.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "log" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" -) - -var logger = log.New(ioutil.Discard, "bleve mapping ", log.LstdFlags) - -// SetLog sets the logger used for logging -// by default log messages are sent to ioutil.Discard -func SetLog(l *log.Logger) { - logger = l -} - -// A Query represents a description of the type -// and parameters for a query into the index. -type Query interface { - Searcher(i index.IndexReader, m mapping.IndexMapping, - options search.SearcherOptions) (search.Searcher, error) -} - -// A BoostableQuery represents a Query which can be boosted -// relative to other queries. -type BoostableQuery interface { - Query - SetBoost(b float64) - Boost() float64 -} - -// A FieldableQuery represents a Query which can be restricted -// to a single field. -type FieldableQuery interface { - Query - SetField(f string) - Field() string -} - -// A ValidatableQuery represents a Query which can be validated -// prior to execution. -type ValidatableQuery interface { - Query - Validate() error -} - -// ParseQuery deserializes a JSON representation of -// a Query object. -func ParseQuery(input []byte) (Query, error) { - var tmp map[string]interface{} - err := json.Unmarshal(input, &tmp) - if err != nil { - return nil, err - } - _, isMatchQuery := tmp["match"] - _, hasFuzziness := tmp["fuzziness"] - if hasFuzziness && !isMatchQuery { - var rv FuzzyQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, isTermQuery := tmp["term"] - if isTermQuery { - var rv TermQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - if isMatchQuery { - var rv MatchQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, isMatchPhraseQuery := tmp["match_phrase"] - if isMatchPhraseQuery { - var rv MatchPhraseQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasMust := tmp["must"] - _, hasShould := tmp["should"] - _, hasMustNot := tmp["must_not"] - if hasMust || hasShould || hasMustNot { - var rv BooleanQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasTerms := tmp["terms"] - if hasTerms { - var rv PhraseQuery - err := json.Unmarshal(input, &rv) - if err != nil { - // now try multi-phrase - var rv2 MultiPhraseQuery - err = json.Unmarshal(input, &rv2) - if err != nil { - return nil, err - } - return &rv2, nil - } - return &rv, nil - } - _, hasConjuncts := tmp["conjuncts"] - if hasConjuncts { - var rv ConjunctionQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasDisjuncts := tmp["disjuncts"] - if hasDisjuncts { - var rv DisjunctionQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - - _, hasSyntaxQuery := tmp["query"] - if hasSyntaxQuery { - var rv QueryStringQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasMin := tmp["min"].(float64) - _, hasMax := tmp["max"].(float64) - if hasMin || hasMax { - var rv NumericRangeQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasMinStr := tmp["min"].(string) - _, hasMaxStr := tmp["max"].(string) - if hasMinStr || hasMaxStr { - var rv TermRangeQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasStart := tmp["start"] - _, hasEnd := tmp["end"] - if hasStart || hasEnd { - var rv DateRangeQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasPrefix := tmp["prefix"] - if hasPrefix { - var rv PrefixQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasRegexp := tmp["regexp"] - if hasRegexp { - var rv RegexpQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasWildcard := tmp["wildcard"] - if hasWildcard { - var rv WildcardQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasMatchAll := tmp["match_all"] - if hasMatchAll { - var rv MatchAllQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasMatchNone := tmp["match_none"] - if hasMatchNone { - var rv MatchNoneQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasDocIds := tmp["ids"] - if hasDocIds { - var rv DocIDQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasBool := tmp["bool"] - if hasBool { - var rv BoolFieldQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasTopLeft := tmp["top_left"] - _, hasBottomRight := tmp["bottom_right"] - if hasTopLeft && hasBottomRight { - var rv GeoBoundingBoxQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasDistance := tmp["distance"] - if hasDistance { - var rv GeoDistanceQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - _, hasPoints := tmp["polygon_points"] - if hasPoints { - var rv GeoBoundingPolygonQuery - err := json.Unmarshal(input, &rv) - if err != nil { - return nil, err - } - return &rv, nil - } - return nil, fmt.Errorf("unknown query type") -} - -// expandQuery traverses the input query tree and returns a new tree where -// query string queries have been expanded into base queries. Returned tree may -// reference queries from the input tree or new queries. -func expandQuery(m mapping.IndexMapping, query Query) (Query, error) { - var expand func(query Query) (Query, error) - var expandSlice func(queries []Query) ([]Query, error) - - expandSlice = func(queries []Query) ([]Query, error) { - expanded := []Query{} - for _, q := range queries { - exp, err := expand(q) - if err != nil { - return nil, err - } - expanded = append(expanded, exp) - } - return expanded, nil - } - - expand = func(query Query) (Query, error) { - switch q := query.(type) { - case *QueryStringQuery: - parsed, err := parseQuerySyntax(q.Query) - if err != nil { - return nil, fmt.Errorf("could not parse '%s': %s", q.Query, err) - } - return expand(parsed) - case *ConjunctionQuery: - children, err := expandSlice(q.Conjuncts) - if err != nil { - return nil, err - } - q.Conjuncts = children - return q, nil - case *DisjunctionQuery: - children, err := expandSlice(q.Disjuncts) - if err != nil { - return nil, err - } - q.Disjuncts = children - return q, nil - case *BooleanQuery: - var err error - q.Must, err = expand(q.Must) - if err != nil { - return nil, err - } - q.Should, err = expand(q.Should) - if err != nil { - return nil, err - } - q.MustNot, err = expand(q.MustNot) - if err != nil { - return nil, err - } - return q, nil - default: - return query, nil - } - } - return expand(query) -} - -// DumpQuery returns a string representation of the query tree, where query -// string queries have been expanded into base queries. The output format is -// meant for debugging purpose and may change in the future. -func DumpQuery(m mapping.IndexMapping, query Query) (string, error) { - q, err := expandQuery(m, query) - if err != nil { - return "", err - } - data, err := json.MarshalIndent(q, "", " ") - return string(data), err -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.go b/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.go deleted file mode 100644 index fe1680c52..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" -) - -type QueryStringQuery struct { - Query string `json:"query"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewQueryStringQuery creates a new Query used for -// finding documents that satisfy a query string. The -// query string is a small query language for humans. -func NewQueryStringQuery(query string) *QueryStringQuery { - return &QueryStringQuery{ - Query: query, - } -} - -func (q *QueryStringQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *QueryStringQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *QueryStringQuery) Parse() (Query, error) { - return parseQuerySyntax(q.Query) -} - -func (q *QueryStringQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - newQuery, err := parseQuerySyntax(q.Query) - if err != nil { - return nil, err - } - return newQuery.Searcher(i, m, options) -} - -func (q *QueryStringQuery) Validate() error { - newQuery, err := parseQuerySyntax(q.Query) - if err != nil { - return err - } - if newQuery, ok := newQuery.(ValidatableQuery); ok { - return newQuery.Validate() - } - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.y b/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.y deleted file mode 100644 index aeec85600..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.y +++ /dev/null @@ -1,338 +0,0 @@ -%{ -package query -import ( - "fmt" - "strconv" - "strings" - "time" -) - -func logDebugGrammar(format string, v ...interface{}) { - if debugParser { - logger.Printf(format, v...) - } -} -%} - -%union { -s string -n int -f float64 -q Query -pf *float64} - -%token tSTRING tPHRASE tPLUS tMINUS tCOLON tBOOST tNUMBER tSTRING tGREATER tLESS -tEQUAL tTILDE - -%type tSTRING -%type tPHRASE -%type tNUMBER -%type posOrNegNumber -%type fieldName -%type tTILDE -%type tBOOST -%type searchBase -%type searchSuffix -%type searchPrefix - -%% - -input: -searchParts { - logDebugGrammar("INPUT") -}; - -searchParts: -searchPart searchParts { - logDebugGrammar("SEARCH PARTS") -} -| -searchPart { - logDebugGrammar("SEARCH PART") -}; - -searchPart: -searchPrefix searchBase searchSuffix { - query := $2 - if $3 != nil { - if query, ok := query.(BoostableQuery); ok { - query.SetBoost(*$3) - } - } - switch($1) { - case queryShould: - yylex.(*lexerWrapper).query.AddShould(query) - case queryMust: - yylex.(*lexerWrapper).query.AddMust(query) - case queryMustNot: - yylex.(*lexerWrapper).query.AddMustNot(query) - } -}; - - -searchPrefix: -/* empty */ { - $$ = queryShould -} -| -tPLUS { - logDebugGrammar("PLUS") - $$ = queryMust -} -| -tMINUS { - logDebugGrammar("MINUS") - $$ = queryMustNot -}; - -searchBase: -tSTRING { - str := $1 - logDebugGrammar("STRING - %s", str) - var q FieldableQuery - if strings.HasPrefix(str, "/") && strings.HasSuffix(str, "/") { - q = NewRegexpQuery(str[1:len(str)-1]) - } else if strings.ContainsAny(str, "*?"){ - q = NewWildcardQuery(str) - } else { - q = NewMatchQuery(str) - } - $$ = q -} -| -tSTRING tTILDE { - str := $1 - fuzziness, err := strconv.ParseFloat($2, 64) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid fuzziness value: %v", err)) - } - logDebugGrammar("FUZZY STRING - %s %f", str, fuzziness) - q := NewMatchQuery(str) - q.SetFuzziness(int(fuzziness)) - $$ = q -} -| -fieldName tCOLON tSTRING tTILDE { - field := $1 - str := $3 - fuzziness, err := strconv.ParseFloat($4, 64) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid fuzziness value: %v", err)) - } - logDebugGrammar("FIELD - %s FUZZY STRING - %s %f", field, str, fuzziness) - q := NewMatchQuery(str) - q.SetFuzziness(int(fuzziness)) - q.SetField(field) - $$ = q -} -| -tNUMBER { - str := $1 - logDebugGrammar("STRING - %s", str) - q1 := NewMatchQuery(str) - val, err := strconv.ParseFloat($1, 64) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) - } - inclusive := true - q2 := NewNumericRangeInclusiveQuery(&val, &val, &inclusive, &inclusive) - q := NewDisjunctionQuery([]Query{q1,q2}) - q.queryStringMode = true - $$ = q -} -| -tPHRASE { - phrase := $1 - logDebugGrammar("PHRASE - %s", phrase) - q := NewMatchPhraseQuery(phrase) - $$ = q -} -| -fieldName tCOLON tSTRING { - field := $1 - str := $3 - logDebugGrammar("FIELD - %s STRING - %s", field, str) - var q FieldableQuery - if strings.HasPrefix(str, "/") && strings.HasSuffix(str, "/") { - q = NewRegexpQuery(str[1:len(str)-1]) - } else if strings.ContainsAny(str, "*?"){ - q = NewWildcardQuery(str) - } else { - q = NewMatchQuery(str) - } - q.SetField(field) - $$ = q -} -| -fieldName tCOLON posOrNegNumber { - field := $1 - str := $3 - logDebugGrammar("FIELD - %s STRING - %s", field, str) - q1 := NewMatchQuery(str) - q1.SetField(field) - val, err := strconv.ParseFloat($3, 64) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) - } - inclusive := true - q2 := NewNumericRangeInclusiveQuery(&val, &val, &inclusive, &inclusive) - q2.SetField(field) - q := NewDisjunctionQuery([]Query{q1,q2}) - q.queryStringMode = true - $$ = q -} -| -fieldName tCOLON tPHRASE { - field := $1 - phrase := $3 - logDebugGrammar("FIELD - %s PHRASE - %s", field, phrase) - q := NewMatchPhraseQuery(phrase) - q.SetField(field) - $$ = q -} -| -fieldName tCOLON tGREATER posOrNegNumber { - field := $1 - min, err := strconv.ParseFloat($4, 64) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) - } - minInclusive := false - logDebugGrammar("FIELD - GREATER THAN %f", min) - q := NewNumericRangeInclusiveQuery(&min, nil, &minInclusive, nil) - q.SetField(field) - $$ = q -} -| -fieldName tCOLON tGREATER tEQUAL posOrNegNumber { - field := $1 - min, err := strconv.ParseFloat($5, 64) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) - } - minInclusive := true - logDebugGrammar("FIELD - GREATER THAN OR EQUAL %f", min) - q := NewNumericRangeInclusiveQuery(&min, nil, &minInclusive, nil) - q.SetField(field) - $$ = q -} -| -fieldName tCOLON tLESS posOrNegNumber { - field := $1 - max, err := strconv.ParseFloat($4, 64) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) - } - maxInclusive := false - logDebugGrammar("FIELD - LESS THAN %f", max) - q := NewNumericRangeInclusiveQuery(nil, &max, nil, &maxInclusive) - q.SetField(field) - $$ = q -} -| -fieldName tCOLON tLESS tEQUAL posOrNegNumber { - field := $1 - max, err := strconv.ParseFloat($5, 64) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) - } - maxInclusive := true - logDebugGrammar("FIELD - LESS THAN OR EQUAL %f", max) - q := NewNumericRangeInclusiveQuery(nil, &max, nil, &maxInclusive) - q.SetField(field) - $$ = q -} -| -fieldName tCOLON tGREATER tPHRASE { - field := $1 - minInclusive := false - phrase := $4 - - logDebugGrammar("FIELD - GREATER THAN DATE %s", phrase) - minTime, err := queryTimeFromString(phrase) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid time: %v", err)) - } - q := NewDateRangeInclusiveQuery(minTime, time.Time{}, &minInclusive, nil) - q.SetField(field) - $$ = q -} -| -fieldName tCOLON tGREATER tEQUAL tPHRASE { - field := $1 - minInclusive := true - phrase := $5 - - logDebugGrammar("FIELD - GREATER THAN OR EQUAL DATE %s", phrase) - minTime, err := queryTimeFromString(phrase) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid time: %v", err)) - } - q := NewDateRangeInclusiveQuery(minTime, time.Time{}, &minInclusive, nil) - q.SetField(field) - $$ = q -} -| -fieldName tCOLON tLESS tPHRASE { - field := $1 - maxInclusive := false - phrase := $4 - - logDebugGrammar("FIELD - LESS THAN DATE %s", phrase) - maxTime, err := queryTimeFromString(phrase) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid time: %v", err)) - } - q := NewDateRangeInclusiveQuery(time.Time{}, maxTime, nil, &maxInclusive) - q.SetField(field) - $$ = q -} -| -fieldName tCOLON tLESS tEQUAL tPHRASE { - field := $1 - maxInclusive := true - phrase := $5 - - logDebugGrammar("FIELD - LESS THAN OR EQUAL DATE %s", phrase) - maxTime, err := queryTimeFromString(phrase) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid time: %v", err)) - } - q := NewDateRangeInclusiveQuery(time.Time{}, maxTime, nil, &maxInclusive) - q.SetField(field) - $$ = q -}; - -searchSuffix: -/* empty */ { - $$ = nil -} -| -tBOOST { - $$ = nil - boost, err := strconv.ParseFloat($1, 64) - if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid boost value: %v", err)) - } else { - $$ = &boost - } - logDebugGrammar("BOOST %f", boost) -}; - -posOrNegNumber: -tNUMBER { - $$ = $1 -} -| -tMINUS tNUMBER { - $$ = "-" + $2 -}; - -fieldName: -tPHRASE { - $$ = $1 -} -| -tSTRING { - $$ = $1 -}; diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string_parser.go b/vendor/github.com/blevesearch/bleve/v2/search/query/query_string_parser.go deleted file mode 100644 index 3fb7731b8..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string_parser.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// as of Go 1.8 this requires the goyacc external tool -// available from golang.org/x/tools/cmd/goyacc - -//go:generate goyacc -o query_string.y.go query_string.y -//go:generate sed -i.tmp -e 1d query_string.y.go -//go:generate rm query_string.y.go.tmp - -// note: OSX sed and gnu sed handle the -i (in-place) option differently. -// using -i.tmp works on both, at the expense of having to remove -// the unsightly .tmp files - -package query - -import ( - "fmt" - "strings" -) - -var debugParser bool -var debugLexer bool - -func parseQuerySyntax(query string) (rq Query, err error) { - if query == "" { - return NewMatchNoneQuery(), nil - } - lex := newLexerWrapper(newQueryStringLex(strings.NewReader(query))) - doParse(lex) - - if len(lex.errs) > 0 { - return nil, fmt.Errorf(strings.Join(lex.errs, "\n")) - } - return lex.query, nil -} - -func doParse(lex *lexerWrapper) { - defer func() { - r := recover() - if r != nil { - lex.errs = append(lex.errs, fmt.Sprintf("parse error: %v", r)) - } - }() - - yyParse(lex) -} - -const ( - queryShould = iota - queryMust - queryMustNot -) - -type lexerWrapper struct { - lex yyLexer - errs []string - query *BooleanQuery -} - -func newLexerWrapper(lex yyLexer) *lexerWrapper { - return &lexerWrapper{ - lex: lex, - query: NewBooleanQueryForQueryString(nil, nil, nil), - } -} - -func (l *lexerWrapper) Lex(lval *yySymType) int { - return l.lex.Lex(lval) -} - -func (l *lexerWrapper) Error(s string) { - l.errs = append(l.errs, s) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/regexp.go b/vendor/github.com/blevesearch/bleve/v2/search/query/regexp.go deleted file mode 100644 index ba744ec15..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/regexp.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "strings" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type RegexpQuery struct { - Regexp string `json:"regexp"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewRegexpQuery creates a new Query which finds -// documents containing terms that match the -// specified regular expression. The regexp pattern -// SHOULD NOT include ^ or $ modifiers, the search -// will only match entire terms even without them. -func NewRegexpQuery(regexp string) *RegexpQuery { - return &RegexpQuery{ - Regexp: regexp, - } -} - -func (q *RegexpQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *RegexpQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *RegexpQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *RegexpQuery) Field() string { - return q.FieldVal -} - -func (q *RegexpQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - - // require that pattern NOT be anchored to start and end of term. - // do not attempt to remove trailing $, its presence is not - // known to interfere with LiteralPrefix() the way ^ does - // and removing $ introduces possible ambiguities with escaped \$, \\$, etc - actualRegexp := q.Regexp - if strings.HasPrefix(actualRegexp, "^") { - actualRegexp = actualRegexp[1:] // remove leading ^ - } - - return searcher.NewRegexpStringSearcher(i, actualRegexp, field, - q.BoostVal.Value(), options) -} - -func (q *RegexpQuery) Validate() error { - return nil // real validation delayed until searcher constructor -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/term.go b/vendor/github.com/blevesearch/bleve/v2/search/query/term.go deleted file mode 100644 index 82958bb02..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/term.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type TermQuery struct { - Term string `json:"term"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewTermQuery creates a new Query for finding an -// exact term match in the index. -func NewTermQuery(term string) *TermQuery { - return &TermQuery{ - Term: term, - } -} - -func (q *TermQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *TermQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *TermQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *TermQuery) Field() string { - return q.FieldVal -} - -func (q *TermQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - return searcher.NewTermSearcher(i, q.Term, field, q.BoostVal.Value(), options) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/term_range.go b/vendor/github.com/blevesearch/bleve/v2/search/query/term_range.go deleted file mode 100644 index 3edfa6954..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/term_range.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -type TermRangeQuery struct { - Min string `json:"min,omitempty"` - Max string `json:"max,omitempty"` - InclusiveMin *bool `json:"inclusive_min,omitempty"` - InclusiveMax *bool `json:"inclusive_max,omitempty"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewTermRangeQuery creates a new Query for ranges -// of text term values. -// Either, but not both endpoints can be nil. -// The minimum value is inclusive. -// The maximum value is exclusive. -func NewTermRangeQuery(min, max string) *TermRangeQuery { - return NewTermRangeInclusiveQuery(min, max, nil, nil) -} - -// NewTermRangeInclusiveQuery creates a new Query for ranges -// of numeric values. -// Either, but not both endpoints can be nil. -// Control endpoint inclusion with inclusiveMin, inclusiveMax. -func NewTermRangeInclusiveQuery(min, max string, minInclusive, maxInclusive *bool) *TermRangeQuery { - return &TermRangeQuery{ - Min: min, - Max: max, - InclusiveMin: minInclusive, - InclusiveMax: maxInclusive, - } -} - -func (q *TermRangeQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *TermRangeQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *TermRangeQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *TermRangeQuery) Field() string { - return q.FieldVal -} - -func (q *TermRangeQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - var minTerm []byte - if q.Min != "" { - minTerm = []byte(q.Min) - } - var maxTerm []byte - if q.Max != "" { - maxTerm = []byte(q.Max) - } - return searcher.NewTermRangeSearcher(i, minTerm, maxTerm, q.InclusiveMin, q.InclusiveMax, field, q.BoostVal.Value(), options) -} - -func (q *TermRangeQuery) Validate() error { - if q.Min == "" && q.Min == q.Max { - return fmt.Errorf("term range query must specify min or max") - } - return nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/wildcard.go b/vendor/github.com/blevesearch/bleve/v2/search/query/wildcard.go deleted file mode 100644 index 7713a9acb..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/wildcard.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package query - -import ( - "strings" - - "github.com/blevesearch/bleve/v2/mapping" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/searcher" - index "github.com/blevesearch/bleve_index_api" -) - -var wildcardRegexpReplacer = strings.NewReplacer( - // characters in the wildcard that must - // be escaped in the regexp - "+", `\+`, - "(", `\(`, - ")", `\)`, - "^", `\^`, - "$", `\$`, - ".", `\.`, - "{", `\{`, - "}", `\}`, - "[", `\[`, - "]", `\]`, - `|`, `\|`, - `\`, `\\`, - // wildcard characters - "*", ".*", - "?", ".") - -type WildcardQuery struct { - Wildcard string `json:"wildcard"` - FieldVal string `json:"field,omitempty"` - BoostVal *Boost `json:"boost,omitempty"` -} - -// NewWildcardQuery creates a new Query which finds -// documents containing terms that match the -// specified wildcard. In the wildcard pattern '*' -// will match any sequence of 0 or more characters, -// and '?' will match any single character. -func NewWildcardQuery(wildcard string) *WildcardQuery { - return &WildcardQuery{ - Wildcard: wildcard, - } -} - -func (q *WildcardQuery) SetBoost(b float64) { - boost := Boost(b) - q.BoostVal = &boost -} - -func (q *WildcardQuery) Boost() float64 { - return q.BoostVal.Value() -} - -func (q *WildcardQuery) SetField(f string) { - q.FieldVal = f -} - -func (q *WildcardQuery) Field() string { - return q.FieldVal -} - -func (q *WildcardQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - field := q.FieldVal - if q.FieldVal == "" { - field = m.DefaultSearchField() - } - - regexpString := wildcardRegexpReplacer.Replace(q.Wildcard) - - return searcher.NewRegexpStringSearcher(i, regexpString, field, - q.BoostVal.Value(), options) -} - -func (q *WildcardQuery) Validate() error { - return nil // real validation delayed until searcher constructor -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_conjunction.go b/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_conjunction.go deleted file mode 100644 index f3c81a78c..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_conjunction.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorer - -import ( - "reflect" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" -) - -var reflectStaticSizeConjunctionQueryScorer int - -func init() { - var cqs ConjunctionQueryScorer - reflectStaticSizeConjunctionQueryScorer = int(reflect.TypeOf(cqs).Size()) -} - -type ConjunctionQueryScorer struct { - options search.SearcherOptions -} - -func (s *ConjunctionQueryScorer) Size() int { - return reflectStaticSizeConjunctionQueryScorer + size.SizeOfPtr -} - -func NewConjunctionQueryScorer(options search.SearcherOptions) *ConjunctionQueryScorer { - return &ConjunctionQueryScorer{ - options: options, - } -} - -func (s *ConjunctionQueryScorer) Score(ctx *search.SearchContext, constituents []*search.DocumentMatch) *search.DocumentMatch { - var sum float64 - var childrenExplanations []*search.Explanation - if s.options.Explain { - childrenExplanations = make([]*search.Explanation, len(constituents)) - } - - for i, docMatch := range constituents { - sum += docMatch.Score - if s.options.Explain { - childrenExplanations[i] = docMatch.Expl - } - } - newScore := sum - var newExpl *search.Explanation - if s.options.Explain { - newExpl = &search.Explanation{Value: sum, Message: "sum of:", Children: childrenExplanations} - } - - // reuse constituents[0] as the return value - rv := constituents[0] - rv.Score = newScore - rv.Expl = newExpl - rv.FieldTermLocations = search.MergeFieldTermLocations( - rv.FieldTermLocations, constituents[1:]) - - return rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_constant.go b/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_constant.go deleted file mode 100644 index fc36fd5bf..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_constant.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorer - -import ( - "fmt" - "reflect" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeConstantScorer int - -func init() { - var cs ConstantScorer - reflectStaticSizeConstantScorer = int(reflect.TypeOf(cs).Size()) -} - -type ConstantScorer struct { - constant float64 - boost float64 - options search.SearcherOptions - queryNorm float64 - queryWeight float64 - queryWeightExplanation *search.Explanation -} - -func (s *ConstantScorer) Size() int { - sizeInBytes := reflectStaticSizeConstantScorer + size.SizeOfPtr - - if s.queryWeightExplanation != nil { - sizeInBytes += s.queryWeightExplanation.Size() - } - - return sizeInBytes -} - -func NewConstantScorer(constant float64, boost float64, options search.SearcherOptions) *ConstantScorer { - rv := ConstantScorer{ - options: options, - queryWeight: 1.0, - constant: constant, - boost: boost, - } - - return &rv -} - -func (s *ConstantScorer) Weight() float64 { - sum := s.boost - return sum * sum -} - -func (s *ConstantScorer) SetQueryNorm(qnorm float64) { - s.queryNorm = qnorm - - // update the query weight - s.queryWeight = s.boost * s.queryNorm - - if s.options.Explain { - childrenExplanations := make([]*search.Explanation, 2) - childrenExplanations[0] = &search.Explanation{ - Value: s.boost, - Message: "boost", - } - childrenExplanations[1] = &search.Explanation{ - Value: s.queryNorm, - Message: "queryNorm", - } - s.queryWeightExplanation = &search.Explanation{ - Value: s.queryWeight, - Message: fmt.Sprintf("ConstantScore()^%f, product of:", s.boost), - Children: childrenExplanations, - } - } -} - -func (s *ConstantScorer) Score(ctx *search.SearchContext, id index.IndexInternalID) *search.DocumentMatch { - var scoreExplanation *search.Explanation - - score := s.constant - - if s.options.Explain { - scoreExplanation = &search.Explanation{ - Value: score, - Message: fmt.Sprintf("ConstantScore()"), - } - } - - // if the query weight isn't 1, multiply - if s.queryWeight != 1.0 { - score = score * s.queryWeight - if s.options.Explain { - childExplanations := make([]*search.Explanation, 2) - childExplanations[0] = s.queryWeightExplanation - childExplanations[1] = scoreExplanation - scoreExplanation = &search.Explanation{ - Value: score, - Message: fmt.Sprintf("weight(^%f), product of:", s.boost), - Children: childExplanations, - } - } - } - - rv := ctx.DocumentMatchPool.Get() - rv.IndexInternalID = id - rv.Score = score - if s.options.Explain { - rv.Expl = scoreExplanation - } - - return rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_disjunction.go b/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_disjunction.go deleted file mode 100644 index 054e76fd4..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_disjunction.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorer - -import ( - "fmt" - "reflect" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" -) - -var reflectStaticSizeDisjunctionQueryScorer int - -func init() { - var dqs DisjunctionQueryScorer - reflectStaticSizeDisjunctionQueryScorer = int(reflect.TypeOf(dqs).Size()) -} - -type DisjunctionQueryScorer struct { - options search.SearcherOptions -} - -func (s *DisjunctionQueryScorer) Size() int { - return reflectStaticSizeDisjunctionQueryScorer + size.SizeOfPtr -} - -func NewDisjunctionQueryScorer(options search.SearcherOptions) *DisjunctionQueryScorer { - return &DisjunctionQueryScorer{ - options: options, - } -} - -func (s *DisjunctionQueryScorer) Score(ctx *search.SearchContext, constituents []*search.DocumentMatch, countMatch, countTotal int) *search.DocumentMatch { - var sum float64 - var childrenExplanations []*search.Explanation - if s.options.Explain { - childrenExplanations = make([]*search.Explanation, len(constituents)) - } - - for i, docMatch := range constituents { - sum += docMatch.Score - if s.options.Explain { - childrenExplanations[i] = docMatch.Expl - } - } - - var rawExpl *search.Explanation - if s.options.Explain { - rawExpl = &search.Explanation{Value: sum, Message: "sum of:", Children: childrenExplanations} - } - - coord := float64(countMatch) / float64(countTotal) - newScore := sum * coord - var newExpl *search.Explanation - if s.options.Explain { - ce := make([]*search.Explanation, 2) - ce[0] = rawExpl - ce[1] = &search.Explanation{Value: coord, Message: fmt.Sprintf("coord(%d/%d)", countMatch, countTotal)} - newExpl = &search.Explanation{Value: newScore, Message: "product of:", Children: ce} - } - - // reuse constituents[0] as the return value - rv := constituents[0] - rv.Score = newScore - rv.Expl = newExpl - rv.FieldTermLocations = search.MergeFieldTermLocations( - rv.FieldTermLocations, constituents[1:]) - - return rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_term.go b/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_term.go deleted file mode 100644 index ca268648b..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_term.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package scorer - -import ( - "fmt" - "math" - "reflect" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeTermQueryScorer int - -func init() { - var tqs TermQueryScorer - reflectStaticSizeTermQueryScorer = int(reflect.TypeOf(tqs).Size()) -} - -type TermQueryScorer struct { - queryTerm string - queryField string - queryBoost float64 - docTerm uint64 - docTotal uint64 - idf float64 - options search.SearcherOptions - idfExplanation *search.Explanation - includeScore bool - queryNorm float64 - queryWeight float64 - queryWeightExplanation *search.Explanation -} - -func (s *TermQueryScorer) Size() int { - sizeInBytes := reflectStaticSizeTermQueryScorer + size.SizeOfPtr + - len(s.queryTerm) + len(s.queryField) - - if s.idfExplanation != nil { - sizeInBytes += s.idfExplanation.Size() - } - - if s.queryWeightExplanation != nil { - sizeInBytes += s.queryWeightExplanation.Size() - } - - return sizeInBytes -} - -func NewTermQueryScorer(queryTerm []byte, queryField string, queryBoost float64, docTotal, docTerm uint64, options search.SearcherOptions) *TermQueryScorer { - rv := TermQueryScorer{ - queryTerm: string(queryTerm), - queryField: queryField, - queryBoost: queryBoost, - docTerm: docTerm, - docTotal: docTotal, - idf: 1.0 + math.Log(float64(docTotal)/float64(docTerm+1.0)), - options: options, - queryWeight: 1.0, - includeScore: options.Score != "none", - } - - if options.Explain { - rv.idfExplanation = &search.Explanation{ - Value: rv.idf, - Message: fmt.Sprintf("idf(docFreq=%d, maxDocs=%d)", docTerm, docTotal), - } - } - - return &rv -} - -func (s *TermQueryScorer) Weight() float64 { - sum := s.queryBoost * s.idf - return sum * sum -} - -func (s *TermQueryScorer) SetQueryNorm(qnorm float64) { - s.queryNorm = qnorm - - // update the query weight - s.queryWeight = s.queryBoost * s.idf * s.queryNorm - - if s.options.Explain { - childrenExplanations := make([]*search.Explanation, 3) - childrenExplanations[0] = &search.Explanation{ - Value: s.queryBoost, - Message: "boost", - } - childrenExplanations[1] = s.idfExplanation - childrenExplanations[2] = &search.Explanation{ - Value: s.queryNorm, - Message: "queryNorm", - } - s.queryWeightExplanation = &search.Explanation{ - Value: s.queryWeight, - Message: fmt.Sprintf("queryWeight(%s:%s^%f), product of:", s.queryField, s.queryTerm, s.queryBoost), - Children: childrenExplanations, - } - } -} - -func (s *TermQueryScorer) Score(ctx *search.SearchContext, termMatch *index.TermFieldDoc) *search.DocumentMatch { - rv := ctx.DocumentMatchPool.Get() - // perform any score computations only when needed - if s.includeScore || s.options.Explain { - var scoreExplanation *search.Explanation - var tf float64 - if termMatch.Freq < MaxSqrtCache { - tf = SqrtCache[int(termMatch.Freq)] - } else { - tf = math.Sqrt(float64(termMatch.Freq)) - } - score := tf * termMatch.Norm * s.idf - - if s.options.Explain { - childrenExplanations := make([]*search.Explanation, 3) - childrenExplanations[0] = &search.Explanation{ - Value: tf, - Message: fmt.Sprintf("tf(termFreq(%s:%s)=%d", s.queryField, s.queryTerm, termMatch.Freq), - } - childrenExplanations[1] = &search.Explanation{ - Value: termMatch.Norm, - Message: fmt.Sprintf("fieldNorm(field=%s, doc=%s)", s.queryField, termMatch.ID), - } - childrenExplanations[2] = s.idfExplanation - scoreExplanation = &search.Explanation{ - Value: score, - Message: fmt.Sprintf("fieldWeight(%s:%s in %s), product of:", s.queryField, s.queryTerm, termMatch.ID), - Children: childrenExplanations, - } - } - - // if the query weight isn't 1, multiply - if s.queryWeight != 1.0 { - score = score * s.queryWeight - if s.options.Explain { - childExplanations := make([]*search.Explanation, 2) - childExplanations[0] = s.queryWeightExplanation - childExplanations[1] = scoreExplanation - scoreExplanation = &search.Explanation{ - Value: score, - Message: fmt.Sprintf("weight(%s:%s^%f in %s), product of:", s.queryField, s.queryTerm, s.queryBoost, termMatch.ID), - Children: childExplanations, - } - } - } - - if s.includeScore { - rv.Score = score - } - - if s.options.Explain { - rv.Expl = scoreExplanation - } - } - - rv.IndexInternalID = append(rv.IndexInternalID, termMatch.ID...) - - if len(termMatch.Vectors) > 0 { - if cap(rv.FieldTermLocations) < len(termMatch.Vectors) { - rv.FieldTermLocations = make([]search.FieldTermLocation, 0, len(termMatch.Vectors)) - } - - for _, v := range termMatch.Vectors { - var ap search.ArrayPositions - if len(v.ArrayPositions) > 0 { - n := len(rv.FieldTermLocations) - if n < cap(rv.FieldTermLocations) { // reuse ap slice if available - ap = rv.FieldTermLocations[:n+1][n].Location.ArrayPositions[:0] - } - ap = append(ap, v.ArrayPositions...) - } - rv.FieldTermLocations = - append(rv.FieldTermLocations, search.FieldTermLocation{ - Field: v.Field, - Term: s.queryTerm, - Location: search.Location{ - Pos: v.Pos, - Start: v.Start, - End: v.End, - ArrayPositions: ap, - }, - }) - } - } - - return rv -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/search.go b/vendor/github.com/blevesearch/bleve/v2/search/search.go deleted file mode 100644 index 9277d5295..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/search.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package search - -import ( - "fmt" - "reflect" - "sort" - - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeDocumentMatch int -var reflectStaticSizeSearchContext int -var reflectStaticSizeLocation int - -func init() { - var dm DocumentMatch - reflectStaticSizeDocumentMatch = int(reflect.TypeOf(dm).Size()) - var sc SearchContext - reflectStaticSizeSearchContext = int(reflect.TypeOf(sc).Size()) - var l Location - reflectStaticSizeLocation = int(reflect.TypeOf(l).Size()) -} - -type ArrayPositions []uint64 - -func (ap ArrayPositions) Equals(other ArrayPositions) bool { - if len(ap) != len(other) { - return false - } - for i := range ap { - if ap[i] != other[i] { - return false - } - } - return true -} - -func (ap ArrayPositions) Compare(other ArrayPositions) int { - for i, p := range ap { - if i >= len(other) { - return 1 - } - if p < other[i] { - return -1 - } - if p > other[i] { - return 1 - } - } - if len(ap) < len(other) { - return -1 - } - return 0 -} - -type Location struct { - // Pos is the position of the term within the field, starting at 1 - Pos uint64 `json:"pos"` - - // Start and End are the byte offsets of the term in the field - Start uint64 `json:"start"` - End uint64 `json:"end"` - - // ArrayPositions contains the positions of the term within any elements. - ArrayPositions ArrayPositions `json:"array_positions"` -} - -func (l *Location) Size() int { - return reflectStaticSizeLocation + size.SizeOfPtr + - len(l.ArrayPositions)*size.SizeOfUint64 -} - -type Locations []*Location - -func (p Locations) Len() int { return len(p) } -func (p Locations) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p Locations) Less(i, j int) bool { - c := p[i].ArrayPositions.Compare(p[j].ArrayPositions) - if c < 0 { - return true - } - if c > 0 { - return false - } - return p[i].Pos < p[j].Pos -} - -func (p Locations) Dedupe() Locations { // destructive! - if len(p) <= 1 { - return p - } - - sort.Sort(p) - - slow := 0 - - for _, pfast := range p { - pslow := p[slow] - if pslow.Pos == pfast.Pos && - pslow.Start == pfast.Start && - pslow.End == pfast.End && - pslow.ArrayPositions.Equals(pfast.ArrayPositions) { - continue // duplicate, so only move fast ahead - } - - slow++ - - p[slow] = pfast - } - - return p[:slow+1] -} - -type TermLocationMap map[string]Locations - -func (t TermLocationMap) AddLocation(term string, location *Location) { - t[term] = append(t[term], location) -} - -type FieldTermLocationMap map[string]TermLocationMap - -type FieldTermLocation struct { - Field string - Term string - Location Location -} - -type FieldFragmentMap map[string][]string - -type DocumentMatch struct { - Index string `json:"index,omitempty"` - ID string `json:"id"` - IndexInternalID index.IndexInternalID `json:"-"` - Score float64 `json:"score"` - Expl *Explanation `json:"explanation,omitempty"` - Locations FieldTermLocationMap `json:"locations,omitempty"` - Fragments FieldFragmentMap `json:"fragments,omitempty"` - Sort []string `json:"sort,omitempty"` - - // Fields contains the values for document fields listed in - // SearchRequest.Fields. Text fields are returned as strings, numeric - // fields as float64s and date fields as time.RFC3339 formatted strings. - Fields map[string]interface{} `json:"fields,omitempty"` - - // used to maintain natural index order - HitNumber uint64 `json:"-"` - - // used to temporarily hold field term location information during - // search processing in an efficient, recycle-friendly manner, to - // be later incorporated into the Locations map when search - // results are completed - FieldTermLocations []FieldTermLocation `json:"-"` -} - -func (dm *DocumentMatch) AddFieldValue(name string, value interface{}) { - if dm.Fields == nil { - dm.Fields = make(map[string]interface{}) - } - existingVal, ok := dm.Fields[name] - if !ok { - dm.Fields[name] = value - return - } - - valSlice, ok := existingVal.([]interface{}) - if ok { - // already a slice, append to it - valSlice = append(valSlice, value) - } else { - // create a slice - valSlice = []interface{}{existingVal, value} - } - dm.Fields[name] = valSlice -} - -// Reset allows an already allocated DocumentMatch to be reused -func (dm *DocumentMatch) Reset() *DocumentMatch { - // remember the []byte used for the IndexInternalID - indexInternalID := dm.IndexInternalID - // remember the []interface{} used for sort - sort := dm.Sort - // remember the FieldTermLocations backing array - ftls := dm.FieldTermLocations - for i := range ftls { // recycle the ArrayPositions of each location - ftls[i].Location.ArrayPositions = ftls[i].Location.ArrayPositions[:0] - } - // idiom to copy over from empty DocumentMatch (0 allocations) - *dm = DocumentMatch{} - // reuse the []byte already allocated (and reset len to 0) - dm.IndexInternalID = indexInternalID[:0] - // reuse the []interface{} already allocated (and reset len to 0) - dm.Sort = sort[:0] - // reuse the FieldTermLocations already allocated (and reset len to 0) - dm.FieldTermLocations = ftls[:0] - return dm -} - -func (dm *DocumentMatch) Size() int { - sizeInBytes := reflectStaticSizeDocumentMatch + size.SizeOfPtr + - len(dm.Index) + - len(dm.ID) + - len(dm.IndexInternalID) - - if dm.Expl != nil { - sizeInBytes += dm.Expl.Size() - } - - for k, v := range dm.Locations { - sizeInBytes += size.SizeOfString + len(k) - for k1, v1 := range v { - sizeInBytes += size.SizeOfString + len(k1) + - size.SizeOfSlice - for _, entry := range v1 { - sizeInBytes += entry.Size() - } - } - } - - for k, v := range dm.Fragments { - sizeInBytes += size.SizeOfString + len(k) + - size.SizeOfSlice - - for _, entry := range v { - sizeInBytes += size.SizeOfString + len(entry) - } - } - - for _, entry := range dm.Sort { - sizeInBytes += size.SizeOfString + len(entry) - } - - for k, _ := range dm.Fields { - sizeInBytes += size.SizeOfString + len(k) + - size.SizeOfPtr - } - - return sizeInBytes -} - -// Complete performs final preparation & transformation of the -// DocumentMatch at the end of search processing, also allowing the -// caller to provide an optional preallocated locations slice -func (dm *DocumentMatch) Complete(prealloc []Location) []Location { - // transform the FieldTermLocations slice into the Locations map - nlocs := len(dm.FieldTermLocations) - if nlocs > 0 { - if cap(prealloc) < nlocs { - prealloc = make([]Location, nlocs) - } - prealloc = prealloc[:nlocs] - - var lastField string - var tlm TermLocationMap - var needsDedupe bool - - for i, ftl := range dm.FieldTermLocations { - if lastField != ftl.Field { - lastField = ftl.Field - - if dm.Locations == nil { - dm.Locations = make(FieldTermLocationMap) - } - - tlm = dm.Locations[ftl.Field] - if tlm == nil { - tlm = make(TermLocationMap) - dm.Locations[ftl.Field] = tlm - } - } - - loc := &prealloc[i] - *loc = ftl.Location - - if len(loc.ArrayPositions) > 0 { // copy - loc.ArrayPositions = append(ArrayPositions(nil), loc.ArrayPositions...) - } - - locs := tlm[ftl.Term] - - // if the loc is before or at the last location, then there - // might be duplicates that need to be deduplicated - if !needsDedupe && len(locs) > 0 { - last := locs[len(locs)-1] - cmp := loc.ArrayPositions.Compare(last.ArrayPositions) - if cmp < 0 || (cmp == 0 && loc.Pos <= last.Pos) { - needsDedupe = true - } - } - - tlm[ftl.Term] = append(locs, loc) - - dm.FieldTermLocations[i] = FieldTermLocation{ // recycle - Location: Location{ - ArrayPositions: ftl.Location.ArrayPositions[:0], - }, - } - } - - if needsDedupe { - for _, tlm := range dm.Locations { - for term, locs := range tlm { - tlm[term] = locs.Dedupe() - } - } - } - } - - dm.FieldTermLocations = dm.FieldTermLocations[:0] // recycle - - return prealloc -} - -func (dm *DocumentMatch) String() string { - return fmt.Sprintf("[%s-%f]", string(dm.IndexInternalID), dm.Score) -} - -type DocumentMatchCollection []*DocumentMatch - -func (c DocumentMatchCollection) Len() int { return len(c) } -func (c DocumentMatchCollection) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func (c DocumentMatchCollection) Less(i, j int) bool { return c[i].Score > c[j].Score } - -type Searcher interface { - Next(ctx *SearchContext) (*DocumentMatch, error) - Advance(ctx *SearchContext, ID index.IndexInternalID) (*DocumentMatch, error) - Close() error - Weight() float64 - SetQueryNorm(float64) - Count() uint64 - Min() int - Size() int - - DocumentMatchPoolSize() int -} - -type SearcherOptions struct { - Explain bool - IncludeTermVectors bool - Score string -} - -// SearchContext represents the context around a single search -type SearchContext struct { - DocumentMatchPool *DocumentMatchPool - Collector Collector - IndexReader index.IndexReader -} - -func (sc *SearchContext) Size() int { - sizeInBytes := reflectStaticSizeSearchContext + size.SizeOfPtr + - reflectStaticSizeDocumentMatchPool + size.SizeOfPtr - - if sc.DocumentMatchPool != nil { - for _, entry := range sc.DocumentMatchPool.avail { - if entry != nil { - sizeInBytes += entry.Size() - } - } - } - - return sizeInBytes -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_docid.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_docid.go deleted file mode 100644 index 2d90ae166..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_docid.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package searcher - -import ( - "reflect" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/scorer" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeDocIDSearcher int - -func init() { - var ds DocIDSearcher - reflectStaticSizeDocIDSearcher = int(reflect.TypeOf(ds).Size()) -} - -// DocIDSearcher returns documents matching a predefined set of identifiers. -type DocIDSearcher struct { - reader index.DocIDReader - scorer *scorer.ConstantScorer - count int -} - -func NewDocIDSearcher(indexReader index.IndexReader, ids []string, boost float64, - options search.SearcherOptions) (searcher *DocIDSearcher, err error) { - - reader, err := indexReader.DocIDReaderOnly(ids) - if err != nil { - return nil, err - } - scorer := scorer.NewConstantScorer(1.0, boost, options) - return &DocIDSearcher{ - scorer: scorer, - reader: reader, - count: len(ids), - }, nil -} - -func (s *DocIDSearcher) Size() int { - return reflectStaticSizeDocIDSearcher + size.SizeOfPtr + - s.reader.Size() + - s.scorer.Size() -} - -func (s *DocIDSearcher) Count() uint64 { - return uint64(s.count) -} - -func (s *DocIDSearcher) Weight() float64 { - return s.scorer.Weight() -} - -func (s *DocIDSearcher) SetQueryNorm(qnorm float64) { - s.scorer.SetQueryNorm(qnorm) -} - -func (s *DocIDSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, error) { - docidMatch, err := s.reader.Next() - if err != nil { - return nil, err - } - if docidMatch == nil { - return nil, nil - } - - docMatch := s.scorer.Score(ctx, docidMatch) - return docMatch, nil -} - -func (s *DocIDSearcher) Advance(ctx *search.SearchContext, ID index.IndexInternalID) (*search.DocumentMatch, error) { - docidMatch, err := s.reader.Advance(ID) - if err != nil { - return nil, err - } - if docidMatch == nil { - return nil, nil - } - - docMatch := s.scorer.Score(ctx, docidMatch) - return docMatch, nil -} - -func (s *DocIDSearcher) Close() error { - return s.reader.Close() -} - -func (s *DocIDSearcher) Min() int { - return 0 -} - -func (s *DocIDSearcher) DocumentMatchPoolSize() int { - return 1 -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_fuzzy.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_fuzzy.go deleted file mode 100644 index aab67010c..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_fuzzy.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package searcher - -import ( - "fmt" - - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" -) - -var MaxFuzziness = 2 - -func NewFuzzySearcher(indexReader index.IndexReader, term string, - prefix, fuzziness int, field string, boost float64, - options search.SearcherOptions) (search.Searcher, error) { - - if fuzziness > MaxFuzziness { - return nil, fmt.Errorf("fuzziness exceeds max (%d)", MaxFuzziness) - } - - if fuzziness < 0 { - return nil, fmt.Errorf("invalid fuzziness, negative") - } - - // Note: we don't byte slice the term for a prefix because of runes. - prefixTerm := "" - for i, r := range term { - if i < prefix { - prefixTerm += string(r) - } else { - break - } - } - candidateTerms, err := findFuzzyCandidateTerms(indexReader, term, fuzziness, - field, prefixTerm) - if err != nil { - return nil, err - } - - return NewMultiTermSearcher(indexReader, candidateTerms, field, - boost, options, true) -} - -func findFuzzyCandidateTerms(indexReader index.IndexReader, term string, - fuzziness int, field, prefixTerm string) (rv []string, err error) { - rv = make([]string, 0) - - // in case of advanced reader implementations directly call - // the levenshtein automaton based iterator to collect the - // candidate terms - if ir, ok := indexReader.(index.IndexReaderFuzzy); ok { - fieldDict, err := ir.FieldDictFuzzy(field, term, fuzziness, prefixTerm) - if err != nil { - return nil, err - } - defer func() { - if cerr := fieldDict.Close(); cerr != nil && err == nil { - err = cerr - } - }() - tfd, err := fieldDict.Next() - for err == nil && tfd != nil { - rv = append(rv, tfd.Term) - if tooManyClauses(len(rv)) { - return nil, tooManyClausesErr(field, len(rv)) - } - tfd, err = fieldDict.Next() - } - return rv, err - } - - var fieldDict index.FieldDict - if len(prefixTerm) > 0 { - fieldDict, err = indexReader.FieldDictPrefix(field, []byte(prefixTerm)) - } else { - fieldDict, err = indexReader.FieldDict(field) - } - if err != nil { - return nil, err - } - defer func() { - if cerr := fieldDict.Close(); cerr != nil && err == nil { - err = cerr - } - }() - - // enumerate terms and check levenshtein distance - var reuse []int - tfd, err := fieldDict.Next() - for err == nil && tfd != nil { - var ld int - var exceeded bool - ld, exceeded, reuse = search.LevenshteinDistanceMaxReuseSlice(term, tfd.Term, fuzziness, reuse) - if !exceeded && ld <= fuzziness { - rv = append(rv, tfd.Term) - if tooManyClauses(len(rv)) { - return nil, tooManyClausesErr(field, len(rv)) - } - } - tfd, err = fieldDict.Next() - } - - return rv, err -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_all.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_all.go deleted file mode 100644 index db8306eee..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_all.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package searcher - -import ( - "reflect" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/scorer" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeMatchAllSearcher int - -func init() { - var mas MatchAllSearcher - reflectStaticSizeMatchAllSearcher = int(reflect.TypeOf(mas).Size()) -} - -type MatchAllSearcher struct { - indexReader index.IndexReader - reader index.DocIDReader - scorer *scorer.ConstantScorer - count uint64 -} - -func NewMatchAllSearcher(indexReader index.IndexReader, boost float64, options search.SearcherOptions) (*MatchAllSearcher, error) { - reader, err := indexReader.DocIDReaderAll() - if err != nil { - return nil, err - } - count, err := indexReader.DocCount() - if err != nil { - _ = reader.Close() - return nil, err - } - scorer := scorer.NewConstantScorer(1.0, boost, options) - return &MatchAllSearcher{ - indexReader: indexReader, - reader: reader, - scorer: scorer, - count: count, - }, nil -} - -func (s *MatchAllSearcher) Size() int { - return reflectStaticSizeMatchAllSearcher + size.SizeOfPtr + - s.reader.Size() + - s.scorer.Size() -} - -func (s *MatchAllSearcher) Count() uint64 { - return s.count -} - -func (s *MatchAllSearcher) Weight() float64 { - return s.scorer.Weight() -} - -func (s *MatchAllSearcher) SetQueryNorm(qnorm float64) { - s.scorer.SetQueryNorm(qnorm) -} - -func (s *MatchAllSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, error) { - id, err := s.reader.Next() - if err != nil { - return nil, err - } - - if id == nil { - return nil, nil - } - - // score match - docMatch := s.scorer.Score(ctx, id) - // return doc match - return docMatch, nil - -} - -func (s *MatchAllSearcher) Advance(ctx *search.SearchContext, ID index.IndexInternalID) (*search.DocumentMatch, error) { - id, err := s.reader.Advance(ID) - if err != nil { - return nil, err - } - - if id == nil { - return nil, nil - } - - // score match - docMatch := s.scorer.Score(ctx, id) - - // return doc match - return docMatch, nil -} - -func (s *MatchAllSearcher) Close() error { - return s.reader.Close() -} - -func (s *MatchAllSearcher) Min() int { - return 0 -} - -func (s *MatchAllSearcher) DocumentMatchPoolSize() int { - return 1 -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_regexp.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_regexp.go deleted file mode 100644 index 81b1cf734..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_regexp.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package searcher - -import ( - "regexp" - - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" -) - -// The Regexp interface defines the subset of the regexp.Regexp API -// methods that are used by bleve indexes, allowing callers to pass in -// alternate implementations. -type Regexp interface { - FindStringIndex(s string) (loc []int) - - LiteralPrefix() (prefix string, complete bool) - - String() string -} - -// NewRegexpStringSearcher is similar to NewRegexpSearcher, but -// additionally optimizes for index readers that handle regexp's. -func NewRegexpStringSearcher(indexReader index.IndexReader, pattern string, - field string, boost float64, options search.SearcherOptions) ( - search.Searcher, error) { - ir, ok := indexReader.(index.IndexReaderRegexp) - if !ok { - r, err := regexp.Compile(pattern) - if err != nil { - return nil, err - } - - return NewRegexpSearcher(indexReader, r, field, boost, options) - } - - fieldDict, err := ir.FieldDictRegexp(field, pattern) - if err != nil { - return nil, err - } - defer func() { - if cerr := fieldDict.Close(); cerr != nil && err == nil { - err = cerr - } - }() - - var candidateTerms []string - - tfd, err := fieldDict.Next() - for err == nil && tfd != nil { - candidateTerms = append(candidateTerms, tfd.Term) - tfd, err = fieldDict.Next() - } - if err != nil { - return nil, err - } - - return NewMultiTermSearcher(indexReader, candidateTerms, field, boost, - options, true) -} - -// NewRegexpSearcher creates a searcher which will match documents that -// contain terms which match the pattern regexp. The match must be EXACT -// matching the entire term. The provided regexp SHOULD NOT start with ^ -// or end with $ as this can intefere with the implementation. Separately, -// matches will be checked to ensure they match the entire term. -func NewRegexpSearcher(indexReader index.IndexReader, pattern Regexp, - field string, boost float64, options search.SearcherOptions) ( - search.Searcher, error) { - var candidateTerms []string - - prefixTerm, complete := pattern.LiteralPrefix() - if complete { - // there is no pattern - candidateTerms = []string{prefixTerm} - } else { - var err error - candidateTerms, err = findRegexpCandidateTerms(indexReader, pattern, field, - prefixTerm) - if err != nil { - return nil, err - } - } - - return NewMultiTermSearcher(indexReader, candidateTerms, field, boost, - options, true) -} - -func findRegexpCandidateTerms(indexReader index.IndexReader, - pattern Regexp, field, prefixTerm string) (rv []string, err error) { - rv = make([]string, 0) - var fieldDict index.FieldDict - if len(prefixTerm) > 0 { - fieldDict, err = indexReader.FieldDictPrefix(field, []byte(prefixTerm)) - } else { - fieldDict, err = indexReader.FieldDict(field) - } - defer func() { - if cerr := fieldDict.Close(); cerr != nil && err == nil { - err = cerr - } - }() - - // enumerate the terms and check against regexp - tfd, err := fieldDict.Next() - for err == nil && tfd != nil { - matchPos := pattern.FindStringIndex(tfd.Term) - if matchPos != nil && matchPos[0] == 0 && matchPos[1] == len(tfd.Term) { - rv = append(rv, tfd.Term) - if tooManyClauses(len(rv)) { - return rv, tooManyClausesErr(field, len(rv)) - } - } - tfd, err = fieldDict.Next() - } - - return rv, err -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term.go deleted file mode 100644 index 55c18d163..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package searcher - -import ( - "reflect" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/scorer" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeTermSearcher int - -func init() { - var ts TermSearcher - reflectStaticSizeTermSearcher = int(reflect.TypeOf(ts).Size()) -} - -type TermSearcher struct { - indexReader index.IndexReader - reader index.TermFieldReader - scorer *scorer.TermQueryScorer - tfd index.TermFieldDoc -} - -func NewTermSearcher(indexReader index.IndexReader, term string, field string, boost float64, options search.SearcherOptions) (*TermSearcher, error) { - return NewTermSearcherBytes(indexReader, []byte(term), field, boost, options) -} - -func NewTermSearcherBytes(indexReader index.IndexReader, term []byte, field string, boost float64, options search.SearcherOptions) (*TermSearcher, error) { - needFreqNorm := options.Score != "none" - reader, err := indexReader.TermFieldReader(term, field, needFreqNorm, needFreqNorm, options.IncludeTermVectors) - if err != nil { - return nil, err - } - return newTermSearcherFromReader(indexReader, reader, term, field, boost, options) -} - -func newTermSearcherFromReader(indexReader index.IndexReader, reader index.TermFieldReader, - term []byte, field string, boost float64, options search.SearcherOptions) (*TermSearcher, error) { - count, err := indexReader.DocCount() - if err != nil { - _ = reader.Close() - return nil, err - } - scorer := scorer.NewTermQueryScorer(term, field, boost, count, reader.Count(), options) - return &TermSearcher{ - indexReader: indexReader, - reader: reader, - scorer: scorer, - }, nil -} - -func (s *TermSearcher) Size() int { - return reflectStaticSizeTermSearcher + size.SizeOfPtr + - s.reader.Size() + - s.tfd.Size() + - s.scorer.Size() -} - -func (s *TermSearcher) Count() uint64 { - return s.reader.Count() -} - -func (s *TermSearcher) Weight() float64 { - return s.scorer.Weight() -} - -func (s *TermSearcher) SetQueryNorm(qnorm float64) { - s.scorer.SetQueryNorm(qnorm) -} - -func (s *TermSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, error) { - termMatch, err := s.reader.Next(s.tfd.Reset()) - if err != nil { - return nil, err - } - - if termMatch == nil { - return nil, nil - } - - // score match - docMatch := s.scorer.Score(ctx, termMatch) - // return doc match - return docMatch, nil - -} - -func (s *TermSearcher) Advance(ctx *search.SearchContext, ID index.IndexInternalID) (*search.DocumentMatch, error) { - termMatch, err := s.reader.Advance(ID, s.tfd.Reset()) - if err != nil { - return nil, err - } - - if termMatch == nil { - return nil, nil - } - - // score match - docMatch := s.scorer.Score(ctx, termMatch) - - // return doc match - return docMatch, nil -} - -func (s *TermSearcher) Close() error { - return s.reader.Close() -} - -func (s *TermSearcher) Min() int { - return 0 -} - -func (s *TermSearcher) DocumentMatchPoolSize() int { - return 1 -} - -func (s *TermSearcher) Optimize(kind string, octx index.OptimizableContext) ( - index.OptimizableContext, error) { - o, ok := s.reader.(index.Optimizable) - if ok { - return o.Optimize(kind, octx) - } - - return nil, nil -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_range.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_range.go deleted file mode 100644 index 5ef58f76f..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_range.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package searcher - -import ( - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" -) - -func NewTermRangeSearcher(indexReader index.IndexReader, - min, max []byte, inclusiveMin, inclusiveMax *bool, field string, - boost float64, options search.SearcherOptions) (search.Searcher, error) { - - if inclusiveMin == nil { - defaultInclusiveMin := true - inclusiveMin = &defaultInclusiveMin - } - if inclusiveMax == nil { - defaultInclusiveMax := false - inclusiveMax = &defaultInclusiveMax - } - - if min == nil { - min = []byte{} - } - - rangeMax := max - if rangeMax != nil { - // the term dictionary range end has an unfortunate implementation - rangeMax = append(rangeMax, 0) - } - - // find the terms with this prefix - fieldDict, err := indexReader.FieldDictRange(field, min, rangeMax) - if err != nil { - return nil, err - } - - defer func() { - if cerr := fieldDict.Close(); cerr != nil && err == nil { - err = cerr - } - }() - - var terms []string - tfd, err := fieldDict.Next() - for err == nil && tfd != nil { - terms = append(terms, tfd.Term) - tfd, err = fieldDict.Next() - } - if err != nil { - return nil, err - } - - if len(terms) < 1 { - return NewMatchNoneSearcher(indexReader) - } - - if !*inclusiveMin && min != nil && string(min) == terms[0] { - terms = terms[1:] - // check again, as we might have removed only entry - if len(terms) < 1 { - return NewMatchNoneSearcher(indexReader) - } - } - - // if our term list included the max, it would be the last item - if !*inclusiveMax && max != nil && string(max) == terms[len(terms)-1] { - terms = terms[:len(terms)-1] - } - - return NewMultiTermSearcher(indexReader, terms, field, boost, options, true) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/sort.go b/vendor/github.com/blevesearch/bleve/v2/search/sort.go deleted file mode 100644 index 496db47fc..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/search/sort.go +++ /dev/null @@ -1,746 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package search - -import ( - "bytes" - "encoding/json" - "fmt" - "math" - "sort" - "strings" - - "github.com/blevesearch/bleve/v2/geo" - "github.com/blevesearch/bleve/v2/numeric" -) - -var HighTerm = strings.Repeat(string([]byte{0xff}), 10) -var LowTerm = string([]byte{0x00}) - -type SearchSort interface { - UpdateVisitor(field string, term []byte) - Value(a *DocumentMatch) string - Descending() bool - - RequiresDocID() bool - RequiresScoring() bool - RequiresFields() []string - - Reverse() - - Copy() SearchSort -} - -func ParseSearchSortObj(input map[string]interface{}) (SearchSort, error) { - descending, ok := input["desc"].(bool) - by, ok := input["by"].(string) - if !ok { - return nil, fmt.Errorf("search sort must specify by") - } - switch by { - case "id": - return &SortDocID{ - Desc: descending, - }, nil - case "score": - return &SortScore{ - Desc: descending, - }, nil - case "geo_distance": - field, ok := input["field"].(string) - if !ok { - return nil, fmt.Errorf("search sort mode geo_distance must specify field") - } - lon, lat, foundLocation := geo.ExtractGeoPoint(input["location"]) - if !foundLocation { - return nil, fmt.Errorf("unable to parse geo_distance location") - } - rvd := &SortGeoDistance{ - Field: field, - Desc: descending, - Lon: lon, - Lat: lat, - unitMult: 1.0, - } - if distUnit, ok := input["unit"].(string); ok { - var err error - rvd.unitMult, err = geo.ParseDistanceUnit(distUnit) - if err != nil { - return nil, err - } - rvd.Unit = distUnit - } - return rvd, nil - case "field": - field, ok := input["field"].(string) - if !ok { - return nil, fmt.Errorf("search sort mode field must specify field") - } - rv := &SortField{ - Field: field, - Desc: descending, - } - typ, ok := input["type"].(string) - if ok { - switch typ { - case "auto": - rv.Type = SortFieldAuto - case "string": - rv.Type = SortFieldAsString - case "number": - rv.Type = SortFieldAsNumber - case "date": - rv.Type = SortFieldAsDate - default: - return nil, fmt.Errorf("unknown sort field type: %s", typ) - } - } - mode, ok := input["mode"].(string) - if ok { - switch mode { - case "default": - rv.Mode = SortFieldDefault - case "min": - rv.Mode = SortFieldMin - case "max": - rv.Mode = SortFieldMax - default: - return nil, fmt.Errorf("unknown sort field mode: %s", mode) - } - } - missing, ok := input["missing"].(string) - if ok { - switch missing { - case "first": - rv.Missing = SortFieldMissingFirst - case "last": - rv.Missing = SortFieldMissingLast - default: - return nil, fmt.Errorf("unknown sort field missing: %s", missing) - } - } - return rv, nil - } - - return nil, fmt.Errorf("unknown search sort by: %s", by) -} - -func ParseSearchSortString(input string) SearchSort { - descending := false - if strings.HasPrefix(input, "-") { - descending = true - input = input[1:] - } else if strings.HasPrefix(input, "+") { - input = input[1:] - } - if input == "_id" { - return &SortDocID{ - Desc: descending, - } - } else if input == "_score" { - return &SortScore{ - Desc: descending, - } - } - return &SortField{ - Field: input, - Desc: descending, - } -} - -func ParseSearchSortJSON(input json.RawMessage) (SearchSort, error) { - // first try to parse it as string - var sortString string - err := json.Unmarshal(input, &sortString) - if err != nil { - var sortObj map[string]interface{} - err = json.Unmarshal(input, &sortObj) - if err != nil { - return nil, err - } - return ParseSearchSortObj(sortObj) - } - return ParseSearchSortString(sortString), nil -} - -func ParseSortOrderStrings(in []string) SortOrder { - rv := make(SortOrder, 0, len(in)) - for _, i := range in { - ss := ParseSearchSortString(i) - rv = append(rv, ss) - } - return rv -} - -func ParseSortOrderJSON(in []json.RawMessage) (SortOrder, error) { - rv := make(SortOrder, 0, len(in)) - for _, i := range in { - ss, err := ParseSearchSortJSON(i) - if err != nil { - return nil, err - } - rv = append(rv, ss) - } - return rv, nil -} - -type SortOrder []SearchSort - -func (so SortOrder) Value(doc *DocumentMatch) { - for _, soi := range so { - doc.Sort = append(doc.Sort, soi.Value(doc)) - } -} - -func (so SortOrder) UpdateVisitor(field string, term []byte) { - for _, soi := range so { - soi.UpdateVisitor(field, term) - } -} - -func (so SortOrder) Copy() SortOrder { - rv := make(SortOrder, len(so)) - for i, soi := range so { - rv[i] = soi.Copy() - } - return rv -} - -// Compare will compare two document matches using the specified sort order -// if both are numbers, we avoid converting back to term -func (so SortOrder) Compare(cachedScoring, cachedDesc []bool, i, j *DocumentMatch) int { - // compare the documents on all search sorts until a differences is found - for x := range so { - c := 0 - if cachedScoring[x] { - if i.Score < j.Score { - c = -1 - } else if i.Score > j.Score { - c = 1 - } - } else { - iVal := i.Sort[x] - jVal := j.Sort[x] - if iVal < jVal { - c = -1 - } else if iVal > jVal { - c = 1 - } - } - - if c == 0 { - continue - } - if cachedDesc[x] { - c = -c - } - return c - } - // if they are the same at this point, impose order based on index natural sort order - if i.HitNumber == j.HitNumber { - return 0 - } else if i.HitNumber > j.HitNumber { - return 1 - } - return -1 -} - -func (so SortOrder) RequiresScore() bool { - for _, soi := range so { - if soi.RequiresScoring() { - return true - } - } - return false -} - -func (so SortOrder) RequiresDocID() bool { - for _, soi := range so { - if soi.RequiresDocID() { - return true - } - } - return false -} - -func (so SortOrder) RequiredFields() []string { - var rv []string - for _, soi := range so { - rv = append(rv, soi.RequiresFields()...) - } - return rv -} - -func (so SortOrder) CacheIsScore() []bool { - rv := make([]bool, 0, len(so)) - for _, soi := range so { - rv = append(rv, soi.RequiresScoring()) - } - return rv -} - -func (so SortOrder) CacheDescending() []bool { - rv := make([]bool, 0, len(so)) - for _, soi := range so { - rv = append(rv, soi.Descending()) - } - return rv -} - -func (so SortOrder) Reverse() { - for _, soi := range so { - soi.Reverse() - } -} - -// SortFieldType lets you control some internal sort behavior -// normally leaving this to the zero-value of SortFieldAuto is fine -type SortFieldType int - -const ( - // SortFieldAuto applies heuristics attempt to automatically sort correctly - SortFieldAuto SortFieldType = iota - // SortFieldAsString forces sort as string (no prefix coded terms removed) - SortFieldAsString - // SortFieldAsNumber forces sort as string (prefix coded terms with shift > 0 removed) - SortFieldAsNumber - // SortFieldAsDate forces sort as string (prefix coded terms with shift > 0 removed) - SortFieldAsDate -) - -// SortFieldMode describes the behavior if the field has multiple values -type SortFieldMode int - -const ( - // SortFieldDefault uses the first (or only) value, this is the default zero-value - SortFieldDefault SortFieldMode = iota // FIXME name is confusing - // SortFieldMin uses the minimum value - SortFieldMin - // SortFieldMax uses the maximum value - SortFieldMax -) - -// SortFieldMissing controls where documents missing a field value should be sorted -type SortFieldMissing int - -const ( - // SortFieldMissingLast sorts documents missing a field at the end - SortFieldMissingLast SortFieldMissing = iota - - // SortFieldMissingFirst sorts documents missing a field at the beginning - SortFieldMissingFirst -) - -// SortField will sort results by the value of a stored field -// Field is the name of the field -// Descending reverse the sort order (default false) -// Type allows forcing of string/number/date behavior (default auto) -// Mode controls behavior for multi-values fields (default first) -// Missing controls behavior of missing values (default last) -type SortField struct { - Field string - Desc bool - Type SortFieldType - Mode SortFieldMode - Missing SortFieldMissing - values [][]byte - tmp [][]byte -} - -// UpdateVisitor notifies this sort field that in this document -// this field has the specified term -func (s *SortField) UpdateVisitor(field string, term []byte) { - if field == s.Field { - s.values = append(s.values, term) - } -} - -// Value returns the sort value of the DocumentMatch -// it also resets the state of this SortField for -// processing the next document -func (s *SortField) Value(i *DocumentMatch) string { - iTerms := s.filterTermsByType(s.values) - iTerm := s.filterTermsByMode(iTerms) - s.values = s.values[:0] - return iTerm -} - -// Descending determines the order of the sort -func (s *SortField) Descending() bool { - return s.Desc -} - -func (s *SortField) filterTermsByMode(terms [][]byte) string { - if len(terms) == 1 || (len(terms) > 1 && s.Mode == SortFieldDefault) { - return string(terms[0]) - } else if len(terms) > 1 { - switch s.Mode { - case SortFieldMin: - sort.Sort(BytesSlice(terms)) - return string(terms[0]) - case SortFieldMax: - sort.Sort(BytesSlice(terms)) - return string(terms[len(terms)-1]) - } - } - - // handle missing terms - if s.Missing == SortFieldMissingLast { - if s.Desc { - return LowTerm - } - return HighTerm - } - if s.Desc { - return HighTerm - } - return LowTerm -} - -// filterTermsByType attempts to make one pass on the terms -// if we are in auto-mode AND all the terms look like prefix-coded numbers -// return only the terms which had shift of 0 -// if we are in explicit number or date mode, return only valid -// prefix coded numbers with shift of 0 -func (s *SortField) filterTermsByType(terms [][]byte) [][]byte { - stype := s.Type - if stype == SortFieldAuto { - allTermsPrefixCoded := true - termsWithShiftZero := s.tmp[:0] - for _, term := range terms { - valid, shift := numeric.ValidPrefixCodedTermBytes(term) - if valid && shift == 0 { - termsWithShiftZero = append(termsWithShiftZero, term) - } else if !valid { - allTermsPrefixCoded = false - } - } - // reset the terms only when valid zero shift terms are found. - if allTermsPrefixCoded && len(termsWithShiftZero) > 0 { - terms = termsWithShiftZero - s.tmp = termsWithShiftZero[:0] - } - } else if stype == SortFieldAsNumber || stype == SortFieldAsDate { - termsWithShiftZero := s.tmp[:0] - for _, term := range terms { - valid, shift := numeric.ValidPrefixCodedTermBytes(term) - if valid && shift == 0 { - termsWithShiftZero = append(termsWithShiftZero, term) - } - } - terms = termsWithShiftZero - s.tmp = termsWithShiftZero[:0] - } - return terms -} - -// RequiresDocID says this SearchSort does not require the DocID be loaded -func (s *SortField) RequiresDocID() bool { return false } - -// RequiresScoring says this SearchStore does not require scoring -func (s *SortField) RequiresScoring() bool { return false } - -// RequiresFields says this SearchStore requires the specified stored field -func (s *SortField) RequiresFields() []string { return []string{s.Field} } - -func (s *SortField) MarshalJSON() ([]byte, error) { - // see if simple format can be used - if s.Missing == SortFieldMissingLast && - s.Mode == SortFieldDefault && - s.Type == SortFieldAuto { - if s.Desc { - return json.Marshal("-" + s.Field) - } - return json.Marshal(s.Field) - } - sfm := map[string]interface{}{ - "by": "field", - "field": s.Field, - } - if s.Desc { - sfm["desc"] = true - } - if s.Missing > SortFieldMissingLast { - switch s.Missing { - case SortFieldMissingFirst: - sfm["missing"] = "first" - } - } - if s.Mode > SortFieldDefault { - switch s.Mode { - case SortFieldMin: - sfm["mode"] = "min" - case SortFieldMax: - sfm["mode"] = "max" - } - } - if s.Type > SortFieldAuto { - switch s.Type { - case SortFieldAsString: - sfm["type"] = "string" - case SortFieldAsNumber: - sfm["type"] = "number" - case SortFieldAsDate: - sfm["type"] = "date" - } - } - - return json.Marshal(sfm) -} - -func (s *SortField) Copy() SearchSort { - rv := *s - return &rv -} - -func (s *SortField) Reverse() { - s.Desc = !s.Desc - if s.Missing == SortFieldMissingFirst { - s.Missing = SortFieldMissingLast - } else { - s.Missing = SortFieldMissingFirst - } -} - -// SortDocID will sort results by the document identifier -type SortDocID struct { - Desc bool -} - -// UpdateVisitor is a no-op for SortDocID as it's value -// is not dependent on any field terms -func (s *SortDocID) UpdateVisitor(field string, term []byte) { -} - -// Value returns the sort value of the DocumentMatch -func (s *SortDocID) Value(i *DocumentMatch) string { - return i.ID -} - -// Descending determines the order of the sort -func (s *SortDocID) Descending() bool { - return s.Desc -} - -// RequiresDocID says this SearchSort does require the DocID be loaded -func (s *SortDocID) RequiresDocID() bool { return true } - -// RequiresScoring says this SearchStore does not require scoring -func (s *SortDocID) RequiresScoring() bool { return false } - -// RequiresFields says this SearchStore does not require any stored fields -func (s *SortDocID) RequiresFields() []string { return nil } - -func (s *SortDocID) MarshalJSON() ([]byte, error) { - if s.Desc { - return json.Marshal("-_id") - } - return json.Marshal("_id") -} - -func (s *SortDocID) Copy() SearchSort { - rv := *s - return &rv -} - -func (s *SortDocID) Reverse() { - s.Desc = !s.Desc -} - -// SortScore will sort results by the document match score -type SortScore struct { - Desc bool -} - -// UpdateVisitor is a no-op for SortScore as it's value -// is not dependent on any field terms -func (s *SortScore) UpdateVisitor(field string, term []byte) { -} - -// Value returns the sort value of the DocumentMatch -func (s *SortScore) Value(i *DocumentMatch) string { - return "_score" -} - -// Descending determines the order of the sort -func (s *SortScore) Descending() bool { - return s.Desc -} - -// RequiresDocID says this SearchSort does not require the DocID be loaded -func (s *SortScore) RequiresDocID() bool { return false } - -// RequiresScoring says this SearchStore does require scoring -func (s *SortScore) RequiresScoring() bool { return true } - -// RequiresFields says this SearchStore does not require any store fields -func (s *SortScore) RequiresFields() []string { return nil } - -func (s *SortScore) MarshalJSON() ([]byte, error) { - if s.Desc { - return json.Marshal("-_score") - } - return json.Marshal("_score") -} - -func (s *SortScore) Copy() SearchSort { - rv := *s - return &rv -} - -func (s *SortScore) Reverse() { - s.Desc = !s.Desc -} - -var maxDistance = string(numeric.MustNewPrefixCodedInt64(math.MaxInt64, 0)) - -// NewSortGeoDistance creates SearchSort instance for sorting documents by -// their distance from the specified point. -func NewSortGeoDistance(field, unit string, lon, lat float64, desc bool) ( - *SortGeoDistance, error) { - rv := &SortGeoDistance{ - Field: field, - Desc: desc, - Unit: unit, - Lon: lon, - Lat: lat, - } - var err error - rv.unitMult, err = geo.ParseDistanceUnit(unit) - if err != nil { - return nil, err - } - return rv, nil -} - -// SortGeoDistance will sort results by the distance of an -// indexed geo point, from the provided location. -// Field is the name of the field -// Descending reverse the sort order (default false) -type SortGeoDistance struct { - Field string - Desc bool - Unit string - values []string - Lon float64 - Lat float64 - unitMult float64 -} - -// UpdateVisitor notifies this sort field that in this document -// this field has the specified term -func (s *SortGeoDistance) UpdateVisitor(field string, term []byte) { - if field == s.Field { - s.values = append(s.values, string(term)) - } -} - -// Value returns the sort value of the DocumentMatch -// it also resets the state of this SortField for -// processing the next document -func (s *SortGeoDistance) Value(i *DocumentMatch) string { - iTerms := s.filterTermsByType(s.values) - iTerm := s.filterTermsByMode(iTerms) - s.values = s.values[:0] - - if iTerm == "" { - return maxDistance - } - - i64, err := numeric.PrefixCoded(iTerm).Int64() - if err != nil { - return maxDistance - } - docLon := geo.MortonUnhashLon(uint64(i64)) - docLat := geo.MortonUnhashLat(uint64(i64)) - - dist := geo.Haversin(s.Lon, s.Lat, docLon, docLat) - // dist is returned in km, so convert to m - dist *= 1000 - if s.unitMult != 0 { - dist /= s.unitMult - } - distInt64 := numeric.Float64ToInt64(dist) - return string(numeric.MustNewPrefixCodedInt64(distInt64, 0)) -} - -// Descending determines the order of the sort -func (s *SortGeoDistance) Descending() bool { - return s.Desc -} - -func (s *SortGeoDistance) filterTermsByMode(terms []string) string { - if len(terms) >= 1 { - return terms[0] - } - - return "" -} - -// filterTermsByType attempts to make one pass on the terms -// return only valid prefix coded numbers with shift of 0 -func (s *SortGeoDistance) filterTermsByType(terms []string) []string { - var termsWithShiftZero []string - for _, term := range terms { - valid, shift := numeric.ValidPrefixCodedTerm(term) - if valid && shift == 0 { - termsWithShiftZero = append(termsWithShiftZero, term) - } - } - return termsWithShiftZero -} - -// RequiresDocID says this SearchSort does not require the DocID be loaded -func (s *SortGeoDistance) RequiresDocID() bool { return false } - -// RequiresScoring says this SearchStore does not require scoring -func (s *SortGeoDistance) RequiresScoring() bool { return false } - -// RequiresFields says this SearchStore requires the specified stored field -func (s *SortGeoDistance) RequiresFields() []string { return []string{s.Field} } - -func (s *SortGeoDistance) MarshalJSON() ([]byte, error) { - sfm := map[string]interface{}{ - "by": "geo_distance", - "field": s.Field, - "location": map[string]interface{}{ - "lon": s.Lon, - "lat": s.Lat, - }, - } - if s.Unit != "" { - sfm["unit"] = s.Unit - } - if s.Desc { - sfm["desc"] = true - } - - return json.Marshal(sfm) -} - -func (s *SortGeoDistance) Copy() SearchSort { - rv := *s - return &rv -} - -func (s *SortGeoDistance) Reverse() { - s.Desc = !s.Desc -} - -type BytesSlice [][]byte - -func (p BytesSlice) Len() int { return len(p) } -func (p BytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) < 0 } -func (p BytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/blevesearch/bleve/v2/size/sizes.go b/vendor/github.com/blevesearch/bleve/v2/size/sizes.go deleted file mode 100644 index 0990bf86e..000000000 --- a/vendor/github.com/blevesearch/bleve/v2/size/sizes.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package size - -import ( - "reflect" -) - -func init() { - var b bool - SizeOfBool = int(reflect.TypeOf(b).Size()) - var f32 float32 - SizeOfFloat32 = int(reflect.TypeOf(f32).Size()) - var f64 float64 - SizeOfFloat64 = int(reflect.TypeOf(f64).Size()) - var i int - SizeOfInt = int(reflect.TypeOf(i).Size()) - var m map[int]int - SizeOfMap = int(reflect.TypeOf(m).Size()) - var ptr *int - SizeOfPtr = int(reflect.TypeOf(ptr).Size()) - var slice []int - SizeOfSlice = int(reflect.TypeOf(slice).Size()) - var str string - SizeOfString = int(reflect.TypeOf(str).Size()) - var u8 uint8 - SizeOfUint8 = int(reflect.TypeOf(u8).Size()) - var u16 uint16 - SizeOfUint16 = int(reflect.TypeOf(u16).Size()) - var u32 uint32 - SizeOfUint32 = int(reflect.TypeOf(u32).Size()) - var u64 uint64 - SizeOfUint64 = int(reflect.TypeOf(u64).Size()) -} - -var SizeOfBool int -var SizeOfFloat32 int -var SizeOfFloat64 int -var SizeOfInt int -var SizeOfMap int -var SizeOfPtr int -var SizeOfSlice int -var SizeOfString int -var SizeOfUint8 int -var SizeOfUint16 int -var SizeOfUint32 int -var SizeOfUint64 int diff --git a/vendor/github.com/blevesearch/bleve_index_api/.golangci.yml b/vendor/github.com/blevesearch/bleve_index_api/.golangci.yml deleted file mode 100644 index a00f6c57e..000000000 --- a/vendor/github.com/blevesearch/bleve_index_api/.golangci.yml +++ /dev/null @@ -1,37 +0,0 @@ -linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true - enable: - - bodyclose - - deadcode - - depguard - - dogsled - - dupl - - errcheck - - goconst - - gocritic - - gocyclo - - gofmt - - goimports - - gomnd - - goprintffuncname - - gosimple - - govet - - ineffassign - - interfacer - - lll - - misspell - - nakedret - - nolintlint - - rowserrcheck - - scopelint - - staticcheck - - structcheck - - stylecheck - - typecheck - - unconvert - - unparam - - unused - - varcheck - - whitespace diff --git a/vendor/github.com/blevesearch/bleve_index_api/README.md b/vendor/github.com/blevesearch/bleve_index_api/README.md deleted file mode 100644 index 46daa6832..000000000 --- a/vendor/github.com/blevesearch/bleve_index_api/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Bleve Index API - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/blevesearch/bleve_index_api)](https://pkg.go.dev/github.com/blevesearch/bleve_index_api) -[![Tests](https://github.com/blevesearch/bleve_index_api/workflows/Tests/badge.svg?branch=master&event=push)](https://github.com/blevesearch/bleve_index_api/actions?query=workflow%3ATests+event%3Apush+branch%3Amaster) -[![Lint](https://github.com/blevesearch/bleve_index_api/workflows/Lint/badge.svg?branch=master&event=push)](https://github.com/blevesearch/bleve_index_api/actions?query=workflow%3ALint+event%3Apush+branch%3Amaster) - -Bleve supports a pluggable Index interface. - -By placing these interfaces in their own, *hopefully* slowly evolving module, it frees up Bleve and the underlying index to each introduce new major versions without interfering with one another. - -With that in mind, we anticipate introducing non-breaking changes only to this module, and keeping the major version at 1.x for some time. \ No newline at end of file diff --git a/vendor/github.com/blevesearch/bleve_index_api/analysis.go b/vendor/github.com/blevesearch/bleve_index_api/analysis.go deleted file mode 100644 index 5ab616c1a..000000000 --- a/vendor/github.com/blevesearch/bleve_index_api/analysis.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -type AnalysisWork func() - -type AnalysisQueue struct { - queue chan AnalysisWork - done chan struct{} -} - -func (q *AnalysisQueue) Queue(work AnalysisWork) { - q.queue <- work -} - -func (q *AnalysisQueue) Close() { - close(q.done) -} - -func NewAnalysisQueue(numWorkers int) *AnalysisQueue { - rv := AnalysisQueue{ - queue: make(chan AnalysisWork), - done: make(chan struct{}), - } - for i := 0; i < numWorkers; i++ { - go AnalysisWorker(rv) - } - return &rv -} - -func AnalysisWorker(q AnalysisQueue) { - // read work off the queue - for { - select { - case <-q.done: - return - case w := <-q.queue: - w() - } - } -} diff --git a/vendor/github.com/blevesearch/bleve_index_api/batch.go b/vendor/github.com/blevesearch/bleve_index_api/batch.go deleted file mode 100644 index ff1eaf6c6..000000000 --- a/vendor/github.com/blevesearch/bleve_index_api/batch.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import "fmt" - -type BatchCallback func(error) - -type Batch struct { - IndexOps map[string]Document - InternalOps map[string][]byte - persistedCallback BatchCallback -} - -func NewBatch() *Batch { - return &Batch{ - IndexOps: make(map[string]Document), - InternalOps: make(map[string][]byte), - } -} - -func (b *Batch) Update(doc Document) { - b.IndexOps[doc.ID()] = doc -} - -func (b *Batch) Delete(id string) { - b.IndexOps[id] = nil -} - -func (b *Batch) SetInternal(key, val []byte) { - b.InternalOps[string(key)] = val -} - -func (b *Batch) DeleteInternal(key []byte) { - b.InternalOps[string(key)] = nil -} - -func (b *Batch) SetPersistedCallback(f BatchCallback) { - b.persistedCallback = f -} - -func (b *Batch) PersistedCallback() BatchCallback { - return b.persistedCallback -} - -func (b *Batch) String() string { - rv := fmt.Sprintf("Batch (%d ops, %d internal ops)\n", len(b.IndexOps), len(b.InternalOps)) - for k, v := range b.IndexOps { - if v != nil { - rv += fmt.Sprintf("\tINDEX - '%s'\n", k) - } else { - rv += fmt.Sprintf("\tDELETE - '%s'\n", k) - } - } - for k, v := range b.InternalOps { - if v != nil { - rv += fmt.Sprintf("\tSET INTERNAL - '%s'\n", k) - } else { - rv += fmt.Sprintf("\tDELETE INTERNAL - '%s'\n", k) - } - } - return rv -} - -func (b *Batch) Reset() { - b.IndexOps = make(map[string]Document) - b.InternalOps = make(map[string][]byte) - b.persistedCallback = nil -} - -func (b *Batch) Merge(o *Batch) { - for k, v := range o.IndexOps { - b.IndexOps[k] = v - } - for k, v := range o.InternalOps { - b.InternalOps[k] = v - } -} - -func (b *Batch) TotalDocSize() int { - var s int - for k, v := range b.IndexOps { - if v != nil { - s += v.Size() + sizeOfString - } - s += len(k) - } - return s -} diff --git a/vendor/github.com/blevesearch/bleve_index_api/document.go b/vendor/github.com/blevesearch/bleve_index_api/document.go deleted file mode 100644 index a6d0527df..000000000 --- a/vendor/github.com/blevesearch/bleve_index_api/document.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2015 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import "time" - -type Document interface { - ID() string - Size() int - - VisitFields(visitor FieldVisitor) - VisitComposite(visitor CompositeFieldVisitor) - HasComposite() bool - - NumPlainTextBytes() uint64 - - AddIDField() -} - -type FieldVisitor func(Field) - -type Field interface { - Name() string - Value() []byte - ArrayPositions() []uint64 - - EncodedFieldType() byte - - Analyze() - - Options() FieldIndexingOptions - - AnalyzedLength() int - AnalyzedTokenFrequencies() TokenFrequencies - - NumPlainTextBytes() uint64 -} - -type CompositeFieldVisitor func(field CompositeField) - -type CompositeField interface { - Field - - Compose(field string, length int, freq TokenFrequencies) -} - -type TextField interface { - Text() string -} - -type NumericField interface { - Number() (float64, error) -} - -type DateTimeField interface { - DateTime() (time.Time, error) -} - -type BooleanField interface { - Boolean() (bool, error) -} - -type GeoPointField interface { - Lon() (float64, error) - Lat() (float64, error) -} diff --git a/vendor/github.com/blevesearch/bleve_index_api/freq.go b/vendor/github.com/blevesearch/bleve_index_api/freq.go deleted file mode 100644 index 5b6c7e1d8..000000000 --- a/vendor/github.com/blevesearch/bleve_index_api/freq.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import "reflect" - -var reflectStaticSizeTokenLocation int -var reflectStaticSizeTokenFreq int - -func init() { - var tl TokenLocation - reflectStaticSizeTokenLocation = int(reflect.TypeOf(tl).Size()) - var tf TokenFreq - reflectStaticSizeTokenFreq = int(reflect.TypeOf(tf).Size()) -} - -// TokenLocation represents one occurrence of a term at a particular location in -// a field. Start, End and Position have the same meaning as in analysis.Token. -// Field and ArrayPositions identify the field value in the source document. -// See document.Field for details. -type TokenLocation struct { - Field string - ArrayPositions []uint64 - Start int - End int - Position int -} - -func (tl *TokenLocation) Size() int { - rv := reflectStaticSizeTokenLocation - rv += len(tl.ArrayPositions) * sizeOfUint64 - return rv -} - -// TokenFreq represents all the occurrences of a term in all fields of a -// document. -type TokenFreq struct { - Term []byte - Locations []*TokenLocation - frequency int -} - -func (tf *TokenFreq) Size() int { - rv := reflectStaticSizeTokenFreq - rv += len(tf.Term) - for _, loc := range tf.Locations { - rv += loc.Size() - } - return rv -} - -func (tf *TokenFreq) Frequency() int { - return tf.frequency -} - -func (tf *TokenFreq) SetFrequency(frequency int) { - tf.frequency = frequency -} - -// TokenFrequencies maps document terms to their combined frequencies from all -// fields. -type TokenFrequencies map[string]*TokenFreq - -func (tfs TokenFrequencies) Size() int { - rv := sizeOfMap - rv += len(tfs) * (sizeOfString + sizeOfPtr) - for k, v := range tfs { - rv += len(k) - rv += v.Size() - } - return rv -} - -func (tfs TokenFrequencies) MergeAll(remoteField string, other TokenFrequencies) { - // walk the new token frequencies - for tfk, tf := range other { - // set the remoteField value in incoming token freqs - for _, l := range tf.Locations { - l.Field = remoteField - } - existingTf, exists := tfs[tfk] - if exists { - existingTf.Locations = append(existingTf.Locations, tf.Locations...) - existingTf.frequency += tf.frequency - } else { - tfs[tfk] = &TokenFreq{ - Term: tf.Term, - frequency: tf.frequency, - Locations: make([]*TokenLocation, len(tf.Locations)), - } - copy(tfs[tfk].Locations, tf.Locations) - } - } -} diff --git a/vendor/github.com/blevesearch/bleve_index_api/go.mod b/vendor/github.com/blevesearch/bleve_index_api/go.mod deleted file mode 100644 index f129fb52c..000000000 --- a/vendor/github.com/blevesearch/bleve_index_api/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/blevesearch/bleve_index_api - -go 1.13 diff --git a/vendor/github.com/blevesearch/bleve_index_api/go.sum b/vendor/github.com/blevesearch/bleve_index_api/go.sum deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/blevesearch/bleve_index_api/index.go b/vendor/github.com/blevesearch/bleve_index_api/index.go deleted file mode 100644 index 149791970..000000000 --- a/vendor/github.com/blevesearch/bleve_index_api/index.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import ( - "bytes" - "reflect" -) - -var reflectStaticSizeTermFieldDoc int -var reflectStaticSizeTermFieldVector int - -func init() { - var tfd TermFieldDoc - reflectStaticSizeTermFieldDoc = int(reflect.TypeOf(tfd).Size()) - var tfv TermFieldVector - reflectStaticSizeTermFieldVector = int(reflect.TypeOf(tfv).Size()) -} - -type Index interface { - Open() error - Close() error - - Update(doc Document) error - Delete(id string) error - Batch(batch *Batch) error - - SetInternal(key, val []byte) error - DeleteInternal(key []byte) error - - // Reader returns a low-level accessor on the index data. Close it to - // release associated resources. - Reader() (IndexReader, error) - - StatsMap() map[string]interface{} -} - -type IndexReader interface { - TermFieldReader(term []byte, field string, includeFreq, includeNorm, includeTermVectors bool) (TermFieldReader, error) - - // DocIDReader returns an iterator over all doc ids - // The caller must close returned instance to release associated resources. - DocIDReaderAll() (DocIDReader, error) - - DocIDReaderOnly(ids []string) (DocIDReader, error) - - FieldDict(field string) (FieldDict, error) - - // FieldDictRange is currently defined to include the start and end terms - FieldDictRange(field string, startTerm []byte, endTerm []byte) (FieldDict, error) - FieldDictPrefix(field string, termPrefix []byte) (FieldDict, error) - - Document(id string) (Document, error) - - DocValueReader(fields []string) (DocValueReader, error) - - Fields() ([]string, error) - - GetInternal(key []byte) ([]byte, error) - - DocCount() (uint64, error) - - ExternalID(id IndexInternalID) (string, error) - InternalID(id string) (IndexInternalID, error) - - Close() error -} - -type IndexReaderRegexp interface { - FieldDictRegexp(field string, regex string) (FieldDict, error) -} - -type IndexReaderFuzzy interface { - FieldDictFuzzy(field string, term string, fuzziness int, prefix string) (FieldDict, error) -} - -type IndexReaderContains interface { - FieldDictContains(field string) (FieldDictContains, error) -} - -type TermFieldVector struct { - Field string - ArrayPositions []uint64 - Pos uint64 - Start uint64 - End uint64 -} - -func (tfv *TermFieldVector) Size() int { - return reflectStaticSizeTermFieldVector + sizeOfPtr + - len(tfv.Field) + len(tfv.ArrayPositions)*sizeOfUint64 -} - -// IndexInternalID is an opaque document identifier interal to the index impl -type IndexInternalID []byte - -func (id IndexInternalID) Equals(other IndexInternalID) bool { - return id.Compare(other) == 0 -} - -func (id IndexInternalID) Compare(other IndexInternalID) int { - return bytes.Compare(id, other) -} - -type TermFieldDoc struct { - Term string - ID IndexInternalID - Freq uint64 - Norm float64 - Vectors []*TermFieldVector -} - -func (tfd *TermFieldDoc) Size() int { - sizeInBytes := reflectStaticSizeTermFieldDoc + sizeOfPtr + - len(tfd.Term) + len(tfd.ID) - - for _, entry := range tfd.Vectors { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -// Reset allows an already allocated TermFieldDoc to be reused -func (tfd *TermFieldDoc) Reset() *TermFieldDoc { - // remember the []byte used for the ID - id := tfd.ID - vectors := tfd.Vectors - // idiom to copy over from empty TermFieldDoc (0 allocations) - *tfd = TermFieldDoc{} - // reuse the []byte already allocated (and reset len to 0) - tfd.ID = id[:0] - tfd.Vectors = vectors[:0] - return tfd -} - -// TermFieldReader is the interface exposing the enumeration of documents -// containing a given term in a given field. Documents are returned in byte -// lexicographic order over their identifiers. -type TermFieldReader interface { - // Next returns the next document containing the term in this field, or nil - // when it reaches the end of the enumeration. The preAlloced TermFieldDoc - // is optional, and when non-nil, will be used instead of allocating memory. - Next(preAlloced *TermFieldDoc) (*TermFieldDoc, error) - - // Advance resets the enumeration at specified document or its immediate - // follower. - Advance(ID IndexInternalID, preAlloced *TermFieldDoc) (*TermFieldDoc, error) - - // Count returns the number of documents contains the term in this field. - Count() uint64 - Close() error - - Size() int -} - -type DictEntry struct { - Term string - Count uint64 -} - -type FieldDict interface { - Next() (*DictEntry, error) - Close() error -} - -type FieldDictContains interface { - Contains(key []byte) (bool, error) -} - -// DocIDReader is the interface exposing enumeration of documents identifiers. -// Close the reader to release associated resources. -type DocIDReader interface { - // Next returns the next document internal identifier in the natural - // index order, nil when the end of the sequence is reached. - Next() (IndexInternalID, error) - - // Advance resets the iteration to the first internal identifier greater than - // or equal to ID. If ID is smaller than the start of the range, the iteration - // will start there instead. If ID is greater than or equal to the end of - // the range, Next() call will return io.EOF. - Advance(ID IndexInternalID) (IndexInternalID, error) - - Size() int - - Close() error -} - -type DocValueVisitor func(field string, term []byte) - -type DocValueReader interface { - VisitDocValues(id IndexInternalID, visitor DocValueVisitor) error -} - -// IndexBuilder is an interface supported by some index schemes -// to allow direct write-only index building -type IndexBuilder interface { - Index(doc Document) error - Close() error -} diff --git a/vendor/github.com/blevesearch/bleve_index_api/indexing_options.go b/vendor/github.com/blevesearch/bleve_index_api/indexing_options.go deleted file mode 100644 index 9724ccae0..000000000 --- a/vendor/github.com/blevesearch/bleve_index_api/indexing_options.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -type FieldIndexingOptions int - -const ( - IndexField FieldIndexingOptions = 1 << iota - StoreField - IncludeTermVectors - DocValues - SkipFreqNorm -) - -func (o FieldIndexingOptions) IsIndexed() bool { - return o&IndexField != 0 -} - -func (o FieldIndexingOptions) IsStored() bool { - return o&StoreField != 0 -} - -func (o FieldIndexingOptions) IncludeTermVectors() bool { - return o&IncludeTermVectors != 0 -} - -func (o FieldIndexingOptions) IncludeDocValues() bool { - return o&DocValues != 0 -} - -func (o FieldIndexingOptions) SkipFreqNorm() bool { - return o&SkipFreqNorm != 0 -} - -func (o FieldIndexingOptions) String() string { - rv := "" - if o.IsIndexed() { - rv += "INDEXED" - } - if o.IsStored() { - if rv != "" { - rv += ", " - } - rv += "STORE" - } - if o.IncludeTermVectors() { - if rv != "" { - rv += ", " - } - rv += "TV" - } - if o.IncludeDocValues() { - if rv != "" { - rv += ", " - } - rv += "DV" - } - if !o.SkipFreqNorm() { - if rv != "" { - rv += ", " - } - rv += "FN" - } - return rv -} diff --git a/vendor/github.com/blevesearch/bleve_index_api/optimize.go b/vendor/github.com/blevesearch/bleve_index_api/optimize.go deleted file mode 100644 index 2b4e1244c..000000000 --- a/vendor/github.com/blevesearch/bleve_index_api/optimize.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -// Optimizable represents an optional interface that implementable by -// optimizable resources (e.g., TermFieldReaders, Searchers). These -// optimizable resources are provided the same OptimizableContext -// instance, so that they can coordinate via dynamic interface -// casting. -type Optimizable interface { - Optimize(kind string, octx OptimizableContext) (OptimizableContext, error) -} - -// Represents a result of optimization -- see the Finish() method. -type Optimized interface{} - -type OptimizableContext interface { - // Once all the optimzable resources have been provided the same - // OptimizableContext instance, the optimization preparations are - // finished or completed via the Finish() method. - // - // Depending on the optimization being performed, the Finish() - // method might return a non-nil Optimized instance. For example, - // the Optimized instance might represent an optimized - // TermFieldReader instance. - Finish() (Optimized, error) -} diff --git a/vendor/github.com/blevesearch/go-porterstemmer/go.mod b/vendor/github.com/blevesearch/go-porterstemmer/go.mod deleted file mode 100644 index d620295d5..000000000 --- a/vendor/github.com/blevesearch/go-porterstemmer/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/blevesearch/go-porterstemmer - -go 1.13 diff --git a/vendor/github.com/blevesearch/mmap-go/go.mod b/vendor/github.com/blevesearch/mmap-go/go.mod deleted file mode 100644 index b7f026573..000000000 --- a/vendor/github.com/blevesearch/mmap-go/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/blevesearch/mmap-go - -require golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6 diff --git a/vendor/github.com/blevesearch/mmap-go/go.sum b/vendor/github.com/blevesearch/mmap-go/go.sum deleted file mode 100644 index db2bff5e0..000000000 --- a/vendor/github.com/blevesearch/mmap-go/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6 h1:IcgEB62HYgAhX0Nd/QrVgZlxlcyxbGQHElLUhW2X4Fo= -golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/blevesearch/scorch_segment_api/v2/.golangci.yml b/vendor/github.com/blevesearch/scorch_segment_api/v2/.golangci.yml deleted file mode 100644 index 664f35f27..000000000 --- a/vendor/github.com/blevesearch/scorch_segment_api/v2/.golangci.yml +++ /dev/null @@ -1,42 +0,0 @@ -linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true - enable: - - bodyclose - - deadcode - - depguard - - dogsled - - dupl - - errcheck - - funlen - - gochecknoinits - - goconst - - gocritic - - gocyclo - - gofmt - - goimports - - golint - - gomnd - - goprintffuncname - - gosec - - gosimple - - govet - - ineffassign - - interfacer - - lll - - misspell - - nakedret - - nolintlint - - rowserrcheck - - scopelint - - staticcheck - - structcheck - - stylecheck - - typecheck - - unconvert - - unparam - - unused - - varcheck - - whitespace - diff --git a/vendor/github.com/blevesearch/scorch_segment_api/v2/README.md b/vendor/github.com/blevesearch/scorch_segment_api/v2/README.md deleted file mode 100644 index dc33b004e..000000000 --- a/vendor/github.com/blevesearch/scorch_segment_api/v2/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Scorch Segment API - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/blevesearch/scorch_segment_api)](https://pkg.go.dev/github.com/blevesearch/scorch_segment_api) -[![Tests](https://github.com/blevesearch/scorch_segment_api/workflows/Tests/badge.svg?branch=master&event=push)](https://github.com/blevesearch/scorch_segment_api/actions?query=workflow%3ATests+event%3Apush+branch%3Amaster) -[![Lint](https://github.com/blevesearch/scorch_segment_api/workflows/Lint/badge.svg?branch=master&event=push)](https://github.com/blevesearch/scorch_segment_api/actions?query=workflow%3ALint+event%3Apush+branch%3Amaster) - -Scorch supports a pluggable Segment interface. - -By placing these interfaces in their own, *hopefully* slowly evolving module, it frees up Scorch and the underlying segment to each introduce new major versions without interfering with one another. - -With that in mind, we anticipate introducing non-breaking changes only to this module, and keeping the major version at 1.x for some time. diff --git a/vendor/github.com/blevesearch/scorch_segment_api/v2/go.mod b/vendor/github.com/blevesearch/scorch_segment_api/v2/go.mod deleted file mode 100644 index fb60b845e..000000000 --- a/vendor/github.com/blevesearch/scorch_segment_api/v2/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/blevesearch/scorch_segment_api/v2 - -go 1.13 - -require ( - github.com/RoaringBitmap/roaring v0.4.23 - github.com/blevesearch/bleve_index_api v1.0.0 - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/mschoch/smat v0.2.0 // indirect -) diff --git a/vendor/github.com/blevesearch/scorch_segment_api/v2/go.sum b/vendor/github.com/blevesearch/scorch_segment_api/v2/go.sum deleted file mode 100644 index c465e59d6..000000000 --- a/vendor/github.com/blevesearch/scorch_segment_api/v2/go.sum +++ /dev/null @@ -1,35 +0,0 @@ -github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= -github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= -github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/blevesearch/segment/go.mod b/vendor/github.com/blevesearch/segment/go.mod deleted file mode 100644 index 2f6597656..000000000 --- a/vendor/github.com/blevesearch/segment/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/blevesearch/segment - -go 1.13 diff --git a/vendor/github.com/blevesearch/snowballstem/go.mod b/vendor/github.com/blevesearch/snowballstem/go.mod deleted file mode 100644 index 12218e2d6..000000000 --- a/vendor/github.com/blevesearch/snowballstem/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/blevesearch/snowballstem - -go 1.13 diff --git a/vendor/github.com/blevesearch/upsidedown_store_api/README.md b/vendor/github.com/blevesearch/upsidedown_store_api/README.md deleted file mode 100644 index 919281947..000000000 --- a/vendor/github.com/blevesearch/upsidedown_store_api/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Upsidedown Store API - -Upsidedown supports a pluggable Key/Value storage interface. - -By placing these interfaces in their own, *hopefully* slowly evolving module, it frees up Upsidedown and the underlying storage implementations to each introduce new major versions without interfering with one another. - -With that in mind, we anticipate introducing non-breaking changes only to this module, and keeping the major version at 1.x for some time. diff --git a/vendor/github.com/blevesearch/upsidedown_store_api/batch.go b/vendor/github.com/blevesearch/upsidedown_store_api/batch.go deleted file mode 100644 index 711052661..000000000 --- a/vendor/github.com/blevesearch/upsidedown_store_api/batch.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -type op struct { - K []byte - V []byte -} - -type EmulatedBatch struct { - Ops []*op - Merger *EmulatedMerge -} - -func NewEmulatedBatch(mo MergeOperator) *EmulatedBatch { - return &EmulatedBatch{ - Ops: make([]*op, 0, 1000), - Merger: NewEmulatedMerge(mo), - } -} - -func (b *EmulatedBatch) Set(key, val []byte) { - ck := make([]byte, len(key)) - copy(ck, key) - cv := make([]byte, len(val)) - copy(cv, val) - b.Ops = append(b.Ops, &op{ck, cv}) -} - -func (b *EmulatedBatch) Delete(key []byte) { - ck := make([]byte, len(key)) - copy(ck, key) - b.Ops = append(b.Ops, &op{ck, nil}) -} - -func (b *EmulatedBatch) Merge(key, val []byte) { - ck := make([]byte, len(key)) - copy(ck, key) - cv := make([]byte, len(val)) - copy(cv, val) - b.Merger.Merge(key, val) -} - -func (b *EmulatedBatch) Reset() { - b.Ops = b.Ops[:0] -} - -func (b *EmulatedBatch) Close() error { - return nil -} diff --git a/vendor/github.com/blevesearch/upsidedown_store_api/go.mod b/vendor/github.com/blevesearch/upsidedown_store_api/go.mod deleted file mode 100644 index 1e89f1829..000000000 --- a/vendor/github.com/blevesearch/upsidedown_store_api/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/blevesearch/upsidedown_store_api - -go 1.13 diff --git a/vendor/github.com/blevesearch/upsidedown_store_api/kvstore.go b/vendor/github.com/blevesearch/upsidedown_store_api/kvstore.go deleted file mode 100644 index 34698c7bd..000000000 --- a/vendor/github.com/blevesearch/upsidedown_store_api/kvstore.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -import "encoding/json" - -// KVStore is an abstraction for working with KV stores. Note that -// in order to be used with the bleve.registry, it must also implement -// a constructor function of the registry.KVStoreConstructor type. -type KVStore interface { - - // Writer returns a KVWriter which can be used to - // make changes to the KVStore. If a writer cannot - // be obtained a non-nil error is returned. - Writer() (KVWriter, error) - - // Reader returns a KVReader which can be used to - // read data from the KVStore. If a reader cannot - // be obtained a non-nil error is returned. - Reader() (KVReader, error) - - // Close closes the KVStore - Close() error -} - -// KVReader is an abstraction of an **ISOLATED** reader -// In this context isolated is defined to mean that -// writes/deletes made after the KVReader is opened -// are not observed. -// Because there is usually a cost associated with -// keeping isolated readers active, users should -// close them as soon as they are no longer needed. -type KVReader interface { - - // Get returns the value associated with the key - // If the key does not exist, nil is returned. - // The caller owns the bytes returned. - Get(key []byte) ([]byte, error) - - // MultiGet retrieves multiple values in one call. - MultiGet(keys [][]byte) ([][]byte, error) - - // PrefixIterator returns a KVIterator that will - // visit all K/V pairs with the provided prefix - PrefixIterator(prefix []byte) KVIterator - - // RangeIterator returns a KVIterator that will - // visit all K/V pairs >= start AND < end - RangeIterator(start, end []byte) KVIterator - - // Close closes the iterator - Close() error -} - -// KVIterator is an abstraction around key iteration -type KVIterator interface { - - // Seek will advance the iterator to the specified key - Seek(key []byte) - - // Next will advance the iterator to the next key - Next() - - // Key returns the key pointed to by the iterator - // The bytes returned are **ONLY** valid until the next call to Seek/Next/Close - // Continued use after that requires that they be copied. - Key() []byte - - // Value returns the value pointed to by the iterator - // The bytes returned are **ONLY** valid until the next call to Seek/Next/Close - // Continued use after that requires that they be copied. - Value() []byte - - // Valid returns whether or not the iterator is in a valid state - Valid() bool - - // Current returns Key(),Value(),Valid() in a single operation - Current() ([]byte, []byte, bool) - - // Close closes the iterator - Close() error -} - -// KVWriter is an abstraction for mutating the KVStore -// KVWriter does **NOT** enforce restrictions of a single writer -// if the underlying KVStore allows concurrent writes, the -// KVWriter interface should also do so, it is up to the caller -// to do this in a way that is safe and makes sense -type KVWriter interface { - - // NewBatch returns a KVBatch for performing batch operations on this kvstore - NewBatch() KVBatch - - // NewBatchEx returns a KVBatch and an associated byte array - // that's pre-sized based on the KVBatchOptions. The caller can - // use the returned byte array for keys and values associated with - // the batch. Once the batch is either executed or closed, the - // associated byte array should no longer be accessed by the - // caller. - NewBatchEx(KVBatchOptions) ([]byte, KVBatch, error) - - // ExecuteBatch will execute the KVBatch, the provided KVBatch **MUST** have - // been created by the same KVStore (though not necessarily the same KVWriter) - // Batch execution is atomic, either all the operations or none will be performed - ExecuteBatch(batch KVBatch) error - - // Close closes the writer - Close() error -} - -// KVBatchOptions provides the KVWriter.NewBatchEx() method with batch -// preparation and preallocation information. -type KVBatchOptions struct { - // TotalBytes is the sum of key and value bytes needed by the - // caller for the entire batch. It affects the size of the - // returned byte array of KVWrite.NewBatchEx(). - TotalBytes int - - // NumSets is the number of Set() calls the caller will invoke on - // the KVBatch. - NumSets int - - // NumDeletes is the number of Delete() calls the caller will invoke - // on the KVBatch. - NumDeletes int - - // NumMerges is the number of Merge() calls the caller will invoke - // on the KVBatch. - NumMerges int -} - -// KVBatch is an abstraction for making multiple KV mutations at once -type KVBatch interface { - - // Set updates the key with the specified value - // both key and value []byte may be reused as soon as this call returns - Set(key, val []byte) - - // Delete removes the specified key - // the key []byte may be reused as soon as this call returns - Delete(key []byte) - - // Merge merges old value with the new value at the specified key - // as prescribed by the KVStores merge operator - // both key and value []byte may be reused as soon as this call returns - Merge(key, val []byte) - - // Reset frees resources for this batch and allows reuse - Reset() - - // Close frees resources - Close() error -} - -// KVStoreStats is an optional interface that KVStores can implement -// if they're able to report any useful stats -type KVStoreStats interface { - // Stats returns a JSON serializable object representing stats for this KVStore - Stats() json.Marshaler - - StatsMap() map[string]interface{} -} diff --git a/vendor/github.com/blevesearch/upsidedown_store_api/merge.go b/vendor/github.com/blevesearch/upsidedown_store_api/merge.go deleted file mode 100644 index ca2561b0a..000000000 --- a/vendor/github.com/blevesearch/upsidedown_store_api/merge.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2014 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -// At the moment this happens to be the same interface as described by -// RocksDB, but this may not always be the case. - -type MergeOperator interface { - - // FullMerge the full sequence of operands on top of the existingValue - // if no value currently exists, existingValue is nil - // return the merged value, and success/failure - FullMerge(key, existingValue []byte, operands [][]byte) ([]byte, bool) - - // Partially merge these two operands. - // If partial merge cannot be done, return nil,false, which will defer - // all processing until the FullMerge is done. - PartialMerge(key, leftOperand, rightOperand []byte) ([]byte, bool) - - // Name returns an identifier for the operator - Name() string -} - -type EmulatedMerge struct { - Merges map[string][][]byte - mo MergeOperator -} - -func NewEmulatedMerge(mo MergeOperator) *EmulatedMerge { - return &EmulatedMerge{ - Merges: make(map[string][][]byte), - mo: mo, - } -} - -func (m *EmulatedMerge) Merge(key, val []byte) { - ops, ok := m.Merges[string(key)] - if ok && len(ops) > 0 { - last := ops[len(ops)-1] - mergedVal, partialMergeOk := m.mo.PartialMerge(key, last, val) - if partialMergeOk { - // replace last entry with the result of the merge - ops[len(ops)-1] = mergedVal - } else { - // could not partial merge, append this to the end - ops = append(ops, val) - } - } else { - ops = [][]byte{val} - } - m.Merges[string(key)] = ops -} diff --git a/vendor/github.com/blevesearch/upsidedown_store_api/multiget.go b/vendor/github.com/blevesearch/upsidedown_store_api/multiget.go deleted file mode 100644 index 635bcd411..000000000 --- a/vendor/github.com/blevesearch/upsidedown_store_api/multiget.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2016 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -// MultiGet is a helper function to retrieve mutiple keys from a -// KVReader, and might be used by KVStore implementations that don't -// have a native multi-get facility. -func MultiGet(kvreader KVReader, keys [][]byte) ([][]byte, error) { - vals := make([][]byte, 0, len(keys)) - - for i, key := range keys { - val, err := kvreader.Get(key) - if err != nil { - return nil, err - } - - vals[i] = val - } - - return vals, nil -} diff --git a/vendor/github.com/blevesearch/vellum/fst.go b/vendor/github.com/blevesearch/vellum/fst.go index 64ee21a41..3140042b6 100644 --- a/vendor/github.com/blevesearch/vellum/fst.go +++ b/vendor/github.com/blevesearch/vellum/fst.go @@ -17,7 +17,7 @@ package vellum import ( "io" - "github.com/willf/bitset" + "github.com/bits-and-blooms/bitset" ) // FST is an in-memory representation of a finite state transducer, diff --git a/vendor/github.com/blevesearch/vellum/go.mod b/vendor/github.com/blevesearch/vellum/go.mod deleted file mode 100644 index afc0acff9..000000000 --- a/vendor/github.com/blevesearch/vellum/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/blevesearch/vellum - -go 1.12 - -require ( - github.com/blevesearch/mmap-go v1.0.2 - github.com/spf13/cobra v0.0.5 - github.com/willf/bitset v1.1.10 - golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // indirect -) diff --git a/vendor/github.com/blevesearch/vellum/go.sum b/vendor/github.com/blevesearch/vellum/go.sum deleted file mode 100644 index 1022e684e..000000000 --- a/vendor/github.com/blevesearch/vellum/go.sum +++ /dev/null @@ -1,40 +0,0 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= -github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/blevesearch/zapx/v11/.golangci.yml b/vendor/github.com/blevesearch/zapx/v11/.golangci.yml deleted file mode 100644 index f0f2f6067..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/.golangci.yml +++ /dev/null @@ -1,29 +0,0 @@ -linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true - enable: - - bodyclose - - deadcode - - depguard - - dupl - - errcheck - - gofmt - - goimports - - goprintffuncname - - gosec - - gosimple - - govet - - ineffassign - - interfacer - - misspell - - nakedret - - nolintlint - - rowserrcheck - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - whitespace - diff --git a/vendor/github.com/blevesearch/zapx/v11/LICENSE b/vendor/github.com/blevesearch/zapx/v11/LICENSE deleted file mode 100644 index 7a4a3ea24..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/blevesearch/zapx/v11/README.md b/vendor/github.com/blevesearch/zapx/v11/README.md deleted file mode 100644 index 4cbf1a145..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# zapx file format - -The zapx module is fork of [zap](https://github.com/blevesearch/zap) module which maintains file format compatibility, but removes dependency on bleve, and instead depends only on the indepenent interface modules: - -- [bleve_index_api](https://github.com/blevesearch/scorch_segment_api) -- [scorch_segment_api](https://github.com/blevesearch/scorch_segment_api) - -Advanced ZAP File Format Documentation is [here](zap.md). - -The file is written in the reverse order that we typically access data. This helps us write in one pass since later sections of the file require file offsets of things we've already written. - -Current usage: - -- mmap the entire file -- crc-32 bytes and version are in fixed position at end of the file -- reading remainder of footer could be version specific -- remainder of footer gives us: - - 3 important offsets (docValue , fields index and stored data index) - - 2 important values (number of docs and chunk factor) -- field data is processed once and memoized onto the heap so that we never have to go back to disk for it -- access to stored data by doc number means first navigating to the stored data index, then accessing a fixed position offset into that slice, which gives us the actual address of the data. the first bytes of that section tell us the size of data so that we know where it ends. -- access to all other indexed data follows the following pattern: - - first know the field name -> convert to id - - next navigate to term dictionary for that field - - some operations stop here and do dictionary ops - - next use dictionary to navigate to posting list for a specific term - - walk posting list - - if necessary, walk posting details as we go - - if location info is desired, consult location bitmap to see if it is there - -## stored fields section - -- for each document - - preparation phase: - - produce a slice of metadata bytes and data bytes - - produce these slices in field id order - - field value is appended to the data slice - - metadata slice is varint encoded with the following values for each field value - - field id (uint16) - - field type (byte) - - field value start offset in uncompressed data slice (uint64) - - field value length (uint64) - - field number of array positions (uint64) - - one additional value for each array position (uint64) - - compress the data slice using snappy - - file writing phase: - - remember the start offset for this document - - write out meta data length (varint uint64) - - write out compressed data length (varint uint64) - - write out the metadata bytes - - write out the compressed data bytes - -## stored fields idx - -- for each document - - write start offset (remembered from previous section) of stored data (big endian uint64) - -With this index and a known document number, we have direct access to all the stored field data. - -## posting details (freq/norm) section - -- for each posting list - - produce a slice containing multiple consecutive chunks (each chunk is varint stream) - - produce a slice remembering offsets of where each chunk starts - - preparation phase: - - for each hit in the posting list - - if this hit is in next chunk close out encoding of last chunk and record offset start of next - - encode term frequency (uint64) - - encode norm factor (float32) - - file writing phase: - - remember start position for this posting list details - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. - -## posting details (location) section - -- for each posting list - - produce a slice containing multiple consecutive chunks (each chunk is varint stream) - - produce a slice remembering offsets of where each chunk starts - - preparation phase: - - for each hit in the posting list - - if this hit is in next chunk close out encoding of last chunk and record offset start of next - - encode field (uint16) - - encode field pos (uint64) - - encode field start (uint64) - - encode field end (uint64) - - encode number of array positions to follow (uint64) - - encode each array position (each uint64) - - file writing phase: - - remember start position for this posting list details - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. - -## postings list section - -- for each posting list - - preparation phase: - - encode roaring bitmap posting list to bytes (so we know the length) - - file writing phase: - - remember the start position for this posting list - - write freq/norm details offset (remembered from previous, as varint uint64) - - write location details offset (remembered from previous, as varint uint64) - - write length of encoded roaring bitmap - - write the serialized roaring bitmap data - -## dictionary - -- for each field - - preparation phase: - - encode vellum FST with dictionary data pointing to file offset of posting list (remembered from previous) - - file writing phase: - - remember the start position of this persistDictionary - - write length of vellum data (varint uint64) - - write out vellum data - -## fields section - -- for each field - - file writing phase: - - remember start offset for each field - - write dictionary address (remembered from previous) (varint uint64) - - write length of field name (varint uint64) - - write field name bytes - -## fields idx - -- for each field - - file writing phase: - - write big endian uint64 of start offset for each field - -NOTE: currently we don't know or record the length of this fields index. Instead we rely on the fact that we know it immediately precedes a footer of known size. - -## fields DocValue - -- for each field - - preparation phase: - - produce a slice containing multiple consecutive chunks, where each chunk is composed of a meta section followed by compressed columnar field data - - produce a slice remembering the length of each chunk - - file writing phase: - - remember the start position of this first field DocValue offset in the footer - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -NOTE: currently the meta header inside each chunk gives clue to the location offsets and size of the data pertaining to a given docID and any -read operation leverage that meta information to extract the document specific data from the file. - -## footer - -- file writing phase - - write number of docs (big endian uint64) - - write stored field index location (big endian uint64) - - write field index location (big endian uint64) - - write field docValue location (big endian uint64) - - write out chunk factor (big endian uint32) - - write out version (big endian uint32) - - write out file CRC of everything preceding this (big endian uint32) diff --git a/vendor/github.com/blevesearch/zapx/v11/build.go b/vendor/github.com/blevesearch/zapx/v11/build.go deleted file mode 100644 index a4b5d2117..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/build.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bufio" - "math" - "os" - - "github.com/blevesearch/vellum" -) - -const Version uint32 = 11 - -const Type string = "zap" - -const fieldNotUninverted = math.MaxUint64 - -func (sb *SegmentBase) Persist(path string) error { - return PersistSegmentBase(sb, path) -} - -// PersistSegmentBase persists SegmentBase in the zap file format. -func PersistSegmentBase(sb *SegmentBase, path string) error { - flag := os.O_RDWR | os.O_CREATE - - f, err := os.OpenFile(path, flag, 0600) - if err != nil { - return err - } - - cleanup := func() { - _ = f.Close() - _ = os.Remove(path) - } - - br := bufio.NewWriter(f) - - _, err = br.Write(sb.mem) - if err != nil { - cleanup() - return err - } - - err = persistFooter(sb.numDocs, sb.storedIndexOffset, sb.fieldsIndexOffset, sb.docValueOffset, - sb.chunkFactor, sb.memCRC, br) - if err != nil { - cleanup() - return err - } - - err = br.Flush() - if err != nil { - cleanup() - return err - } - - err = f.Sync() - if err != nil { - cleanup() - return err - } - - err = f.Close() - if err != nil { - cleanup() - return err - } - - return nil -} - -func persistStoredFieldValues(fieldID int, - storedFieldValues [][]byte, stf []byte, spf [][]uint64, - curr int, metaEncode varintEncoder, data []byte) ( - int, []byte, error) { - for i := 0; i < len(storedFieldValues); i++ { - // encode field - _, err := metaEncode(uint64(fieldID)) - if err != nil { - return 0, nil, err - } - // encode type - _, err = metaEncode(uint64(stf[i])) - if err != nil { - return 0, nil, err - } - // encode start offset - _, err = metaEncode(uint64(curr)) - if err != nil { - return 0, nil, err - } - // end len - _, err = metaEncode(uint64(len(storedFieldValues[i]))) - if err != nil { - return 0, nil, err - } - // encode number of array pos - _, err = metaEncode(uint64(len(spf[i]))) - if err != nil { - return 0, nil, err - } - // encode all array positions - for _, pos := range spf[i] { - _, err = metaEncode(pos) - if err != nil { - return 0, nil, err - } - } - - data = append(data, storedFieldValues[i]...) - curr += len(storedFieldValues[i]) - } - - return curr, data, nil -} - -func InitSegmentBase(mem []byte, memCRC uint32, chunkFactor uint32, - fieldsMap map[string]uint16, fieldsInv []string, numDocs uint64, - storedIndexOffset uint64, fieldsIndexOffset uint64, docValueOffset uint64, - dictLocs []uint64) (*SegmentBase, error) { - sb := &SegmentBase{ - mem: mem, - memCRC: memCRC, - chunkFactor: chunkFactor, - fieldsMap: fieldsMap, - fieldsInv: fieldsInv, - numDocs: numDocs, - storedIndexOffset: storedIndexOffset, - fieldsIndexOffset: fieldsIndexOffset, - docValueOffset: docValueOffset, - dictLocs: dictLocs, - fieldDvReaders: make(map[uint16]*docValueReader), - fieldFSTs: make(map[uint16]*vellum.FST), - } - sb.updateSize() - - err := sb.loadDvReaders() - if err != nil { - return nil, err - } - - return sb, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v11/contentcoder.go b/vendor/github.com/blevesearch/zapx/v11/contentcoder.go deleted file mode 100644 index b9ff8179b..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/contentcoder.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "io" - "reflect" - - "github.com/golang/snappy" -) - -var reflectStaticSizeMetaData int - -func init() { - var md MetaData - reflectStaticSizeMetaData = int(reflect.TypeOf(md).Size()) -} - -var termSeparator byte = 0xff -var termSeparatorSplitSlice = []byte{termSeparator} - -type chunkedContentCoder struct { - final []byte - chunkSize uint64 - currChunk uint64 - chunkLens []uint64 - - w io.Writer - progressiveWrite bool - - chunkMetaBuf bytes.Buffer - chunkBuf bytes.Buffer - - chunkMeta []MetaData - - compressed []byte // temp buf for snappy compression -} - -// MetaData represents the data information inside a -// chunk. -type MetaData struct { - DocNum uint64 // docNum of the data inside the chunk - DocDvOffset uint64 // offset of data inside the chunk for the given docid -} - -// newChunkedContentCoder returns a new chunk content coder which -// packs data into chunks based on the provided chunkSize -func newChunkedContentCoder(chunkSize uint64, maxDocNum uint64, - w io.Writer, progressiveWrite bool) *chunkedContentCoder { - total := maxDocNum/chunkSize + 1 - rv := &chunkedContentCoder{ - chunkSize: chunkSize, - chunkLens: make([]uint64, total), - chunkMeta: make([]MetaData, 0, total), - w: w, - progressiveWrite: progressiveWrite, - } - - return rv -} - -// Reset lets you reuse this chunked content coder. Buffers are reset -// and re used. You cannot change the chunk size. -func (c *chunkedContentCoder) Reset() { - c.currChunk = 0 - c.final = c.final[:0] - c.chunkBuf.Reset() - c.chunkMetaBuf.Reset() - for i := range c.chunkLens { - c.chunkLens[i] = 0 - } - c.chunkMeta = c.chunkMeta[:0] -} - -// Close indicates you are done calling Add() this allows -// the final chunk to be encoded. -func (c *chunkedContentCoder) Close() error { - return c.flushContents() -} - -func (c *chunkedContentCoder) flushContents() error { - // flush the contents, with meta information at first - buf := make([]byte, binary.MaxVarintLen64) - n := binary.PutUvarint(buf, uint64(len(c.chunkMeta))) - _, err := c.chunkMetaBuf.Write(buf[:n]) - if err != nil { - return err - } - - // write out the metaData slice - for _, meta := range c.chunkMeta { - _, err := writeUvarints(&c.chunkMetaBuf, meta.DocNum, meta.DocDvOffset) - if err != nil { - return err - } - } - - // write the metadata to final data - metaData := c.chunkMetaBuf.Bytes() - c.final = append(c.final, c.chunkMetaBuf.Bytes()...) - // write the compressed data to the final data - c.compressed = snappy.Encode(c.compressed[:cap(c.compressed)], c.chunkBuf.Bytes()) - c.final = append(c.final, c.compressed...) - - c.chunkLens[c.currChunk] = uint64(len(c.compressed) + len(metaData)) - - if c.progressiveWrite { - _, err := c.w.Write(c.final) - if err != nil { - return err - } - c.final = c.final[:0] - } - - return nil -} - -// Add encodes the provided byte slice into the correct chunk for the provided -// doc num. You MUST call Add() with increasing docNums. -func (c *chunkedContentCoder) Add(docNum uint64, vals []byte) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // flush out the previous chunk details - err := c.flushContents() - if err != nil { - return err - } - // clearing the chunk specific meta for next chunk - c.chunkBuf.Reset() - c.chunkMetaBuf.Reset() - c.chunkMeta = c.chunkMeta[:0] - c.currChunk = chunk - } - - // get the starting offset for this doc - dvOffset := c.chunkBuf.Len() - dvSize, err := c.chunkBuf.Write(vals) - if err != nil { - return err - } - - c.chunkMeta = append(c.chunkMeta, MetaData{ - DocNum: docNum, - DocDvOffset: uint64(dvOffset + dvSize), - }) - return nil -} - -// Write commits all the encoded chunked contents to the provided writer. -// -// | ..... data ..... | chunk offsets (varints) -// | position of chunk offsets (uint64) | number of offsets (uint64) | -// -func (c *chunkedContentCoder) Write() (int, error) { - var tw int - - if c.final != nil { - // write out the data section first - nw, err := c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - } - - chunkOffsetsStart := uint64(tw) - - if cap(c.final) < binary.MaxVarintLen64 { - c.final = make([]byte, binary.MaxVarintLen64) - } else { - c.final = c.final[0:binary.MaxVarintLen64] - } - chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens) - // write out the chunk offsets - for _, chunkOffset := range chunkOffsets { - n := binary.PutUvarint(c.final, chunkOffset) - nw, err := c.w.Write(c.final[:n]) - tw += nw - if err != nil { - return tw, err - } - } - - chunkOffsetsLen := uint64(tw) - chunkOffsetsStart - - c.final = c.final[0:8] - // write out the length of chunk offsets - binary.BigEndian.PutUint64(c.final, chunkOffsetsLen) - nw, err := c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - - // write out the number of chunks - binary.BigEndian.PutUint64(c.final, uint64(len(c.chunkLens))) - nw, err = c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - - c.final = c.final[:0] - - return tw, nil -} - -// ReadDocValueBoundary elicits the start, end offsets from a -// metaData header slice -func ReadDocValueBoundary(chunk int, metaHeaders []MetaData) (uint64, uint64) { - var start uint64 - if chunk > 0 { - start = metaHeaders[chunk-1].DocDvOffset - } - return start, metaHeaders[chunk].DocDvOffset -} diff --git a/vendor/github.com/blevesearch/zapx/v11/count.go b/vendor/github.com/blevesearch/zapx/v11/count.go deleted file mode 100644 index b6135359f..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/count.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "hash/crc32" - "io" - - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -// CountHashWriter is a wrapper around a Writer which counts the number of -// bytes which have been written and computes a crc32 hash -type CountHashWriter struct { - w io.Writer - crc uint32 - n int - s segment.StatsReporter -} - -// NewCountHashWriter returns a CountHashWriter which wraps the provided Writer -func NewCountHashWriter(w io.Writer) *CountHashWriter { - return &CountHashWriter{w: w} -} - -func NewCountHashWriterWithStatsReporter(w io.Writer, s segment.StatsReporter) *CountHashWriter { - return &CountHashWriter{w: w, s: s} -} - -// Write writes the provided bytes to the wrapped writer and counts the bytes -func (c *CountHashWriter) Write(b []byte) (int, error) { - n, err := c.w.Write(b) - c.crc = crc32.Update(c.crc, crc32.IEEETable, b[:n]) - c.n += n - if c.s != nil { - c.s.ReportBytesWritten(uint64(n)) - } - return n, err -} - -// Count returns the number of bytes written -func (c *CountHashWriter) Count() int { - return c.n -} - -// Sum32 returns the CRC-32 hash of the content written to this writer -func (c *CountHashWriter) Sum32() uint32 { - return c.crc -} diff --git a/vendor/github.com/blevesearch/zapx/v11/dict.go b/vendor/github.com/blevesearch/zapx/v11/dict.go deleted file mode 100644 index e30bf2420..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/dict.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" -) - -// Dictionary is the zap representation of the term dictionary -type Dictionary struct { - sb *SegmentBase - field string - fieldID uint16 - fst *vellum.FST - fstReader *vellum.Reader -} - -// represents an immutable, empty dictionary -var emptyDictionary = &Dictionary{} - -// PostingsList returns the postings list for the specified term -func (d *Dictionary) PostingsList(term []byte, except *roaring.Bitmap, - prealloc segment.PostingsList) (segment.PostingsList, error) { - var preallocPL *PostingsList - pl, ok := prealloc.(*PostingsList) - if ok && pl != nil { - preallocPL = pl - } - return d.postingsList(term, except, preallocPL) -} - -func (d *Dictionary) postingsList(term []byte, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) { - if d.fstReader == nil { - if rv == nil || rv == emptyPostingsList { - return emptyPostingsList, nil - } - return d.postingsListInit(rv, except), nil - } - - postingsOffset, exists, err := d.fstReader.Get(term) - if err != nil { - return nil, fmt.Errorf("vellum err: %v", err) - } - if !exists { - if rv == nil || rv == emptyPostingsList { - return emptyPostingsList, nil - } - return d.postingsListInit(rv, except), nil - } - - return d.postingsListFromOffset(postingsOffset, except, rv) -} - -func (d *Dictionary) postingsListFromOffset(postingsOffset uint64, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) { - rv = d.postingsListInit(rv, except) - - err := rv.read(postingsOffset, d) - if err != nil { - return nil, err - } - - return rv, nil -} - -func (d *Dictionary) postingsListInit(rv *PostingsList, except *roaring.Bitmap) *PostingsList { - if rv == nil || rv == emptyPostingsList { - rv = &PostingsList{} - } else { - postings := rv.postings - if postings != nil { - postings.Clear() - } - - *rv = PostingsList{} // clear the struct - - rv.postings = postings - } - rv.sb = d.sb - rv.except = except - return rv -} - -func (d *Dictionary) Contains(key []byte) (bool, error) { - if d.fst != nil { - return d.fst.Contains(key) - } - return false, nil -} - -// AutomatonIterator returns an iterator which only visits terms -// having the the vellum automaton and start/end key range -func (d *Dictionary) AutomatonIterator(a segment.Automaton, - startKeyInclusive, endKeyExclusive []byte) segment.DictionaryIterator { - if d.fst != nil { - rv := &DictionaryIterator{ - d: d, - } - - itr, err := d.fst.Search(a, startKeyInclusive, endKeyExclusive) - if err == nil { - rv.itr = itr - } else if err != vellum.ErrIteratorDone { - rv.err = err - } - - return rv - } - return emptyDictionaryIterator -} - -// DictionaryIterator is an iterator for term dictionary -type DictionaryIterator struct { - d *Dictionary - itr vellum.Iterator - err error - tmp PostingsList - entry index.DictEntry - omitCount bool -} - -var emptyDictionaryIterator = &DictionaryIterator{} - -// Next returns the next entry in the dictionary -func (i *DictionaryIterator) Next() (*index.DictEntry, error) { - if i.err != nil && i.err != vellum.ErrIteratorDone { - return nil, i.err - } else if i.itr == nil || i.err == vellum.ErrIteratorDone { - return nil, nil - } - term, postingsOffset := i.itr.Current() - i.entry.Term = string(term) - if !i.omitCount { - i.err = i.tmp.read(postingsOffset, i.d) - if i.err != nil { - return nil, i.err - } - i.entry.Count = i.tmp.Count() - } - i.err = i.itr.Next() - return &i.entry, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v11/docvalues.go b/vendor/github.com/blevesearch/zapx/v11/docvalues.go deleted file mode 100644 index 27dba9962..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/docvalues.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "fmt" - "math" - "reflect" - "sort" - - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/golang/snappy" -) - -var reflectStaticSizedocValueReader int - -func init() { - var dvi docValueReader - reflectStaticSizedocValueReader = int(reflect.TypeOf(dvi).Size()) -} - -type docNumTermsVisitor func(docNum uint64, terms []byte) error - -type docVisitState struct { - dvrs map[uint16]*docValueReader - segment *SegmentBase -} - -type docValueReader struct { - field string - curChunkNum uint64 - chunkOffsets []uint64 - dvDataLoc uint64 - curChunkHeader []MetaData - curChunkData []byte // compressed data cache - uncompressed []byte // temp buf for snappy decompression -} - -func (di *docValueReader) size() int { - return reflectStaticSizedocValueReader + SizeOfPtr + - len(di.field) + - len(di.chunkOffsets)*SizeOfUint64 + - len(di.curChunkHeader)*reflectStaticSizeMetaData + - len(di.curChunkData) -} - -func (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader { - if rv == nil { - rv = &docValueReader{} - } - - rv.field = di.field - rv.curChunkNum = math.MaxUint64 - rv.chunkOffsets = di.chunkOffsets // immutable, so it's sharable - rv.dvDataLoc = di.dvDataLoc - rv.curChunkHeader = rv.curChunkHeader[:0] - rv.curChunkData = nil - rv.uncompressed = rv.uncompressed[:0] - - return rv -} - -func (di *docValueReader) curChunkNumber() uint64 { - return di.curChunkNum -} - -func (s *SegmentBase) loadFieldDocValueReader(field string, - fieldDvLocStart, fieldDvLocEnd uint64) (*docValueReader, error) { - // get the docValue offset for the given fields - if fieldDvLocStart == fieldNotUninverted { - // no docValues found, nothing to do - return nil, nil - } - - // read the number of chunks, and chunk offsets position - var numChunks, chunkOffsetsPosition uint64 - - if fieldDvLocEnd-fieldDvLocStart > 16 { - numChunks = binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-8 : fieldDvLocEnd]) - // read the length of chunk offsets - chunkOffsetsLen := binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-16 : fieldDvLocEnd-8]) - // acquire position of chunk offsets - chunkOffsetsPosition = (fieldDvLocEnd - 16) - chunkOffsetsLen - } else { - return nil, fmt.Errorf("loadFieldDocValueReader: fieldDvLoc too small: %d-%d", fieldDvLocEnd, fieldDvLocStart) - } - - fdvIter := &docValueReader{ - curChunkNum: math.MaxUint64, - field: field, - chunkOffsets: make([]uint64, int(numChunks)), - } - - // read the chunk offsets - var offset uint64 - for i := 0; i < int(numChunks); i++ { - loc, read := binary.Uvarint(s.mem[chunkOffsetsPosition+offset : chunkOffsetsPosition+offset+binary.MaxVarintLen64]) - if read <= 0 { - return nil, fmt.Errorf("corrupted chunk offset during segment load") - } - fdvIter.chunkOffsets[i] = loc - offset += uint64(read) - } - - // set the data offset - fdvIter.dvDataLoc = fieldDvLocStart - - return fdvIter, nil -} - -func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error { - // advance to the chunk where the docValues - // reside for the given docNum - destChunkDataLoc, curChunkEnd := di.dvDataLoc, di.dvDataLoc - start, end := readChunkBoundary(int(chunkNumber), di.chunkOffsets) - if start >= end { - di.curChunkHeader = di.curChunkHeader[:0] - di.curChunkData = nil - di.curChunkNum = chunkNumber - di.uncompressed = di.uncompressed[:0] - return nil - } - - destChunkDataLoc += start - curChunkEnd += end - - // read the number of docs reside in the chunk - numDocs, read := binary.Uvarint(s.mem[destChunkDataLoc : destChunkDataLoc+binary.MaxVarintLen64]) - if read <= 0 { - return fmt.Errorf("failed to read the chunk") - } - chunkMetaLoc := destChunkDataLoc + uint64(read) - - offset := uint64(0) - if cap(di.curChunkHeader) < int(numDocs) { - di.curChunkHeader = make([]MetaData, int(numDocs)) - } else { - di.curChunkHeader = di.curChunkHeader[:int(numDocs)] - } - for i := 0; i < int(numDocs); i++ { - di.curChunkHeader[i].DocNum, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) - offset += uint64(read) - di.curChunkHeader[i].DocDvOffset, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) - offset += uint64(read) - } - - compressedDataLoc := chunkMetaLoc + offset - dataLength := curChunkEnd - compressedDataLoc - di.curChunkData = s.mem[compressedDataLoc : compressedDataLoc+dataLength] - di.curChunkNum = chunkNumber - di.uncompressed = di.uncompressed[:0] - return nil -} - -func (di *docValueReader) iterateAllDocValues(s *SegmentBase, visitor docNumTermsVisitor) error { - for i := 0; i < len(di.chunkOffsets); i++ { - err := di.loadDvChunk(uint64(i), s) - if err != nil { - return err - } - if di.curChunkData == nil || len(di.curChunkHeader) == 0 { - continue - } - - // uncompress the already loaded data - uncompressed, err := snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData) - if err != nil { - return err - } - di.uncompressed = uncompressed - - start := uint64(0) - for _, entry := range di.curChunkHeader { - err = visitor(entry.DocNum, uncompressed[start:entry.DocDvOffset]) - if err != nil { - return err - } - - start = entry.DocDvOffset - } - } - - return nil -} - -func (di *docValueReader) visitDocValues(docNum uint64, - visitor index.DocValueVisitor) error { - // binary search the term locations for the docNum - start, end := di.getDocValueLocs(docNum) - if start == math.MaxUint64 || end == math.MaxUint64 || start == end { - return nil - } - - var uncompressed []byte - var err error - // use the uncompressed copy if available - if len(di.uncompressed) > 0 { - uncompressed = di.uncompressed - } else { - // uncompress the already loaded data - uncompressed, err = snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData) - if err != nil { - return err - } - di.uncompressed = uncompressed - } - - // pick the terms for the given docNum - uncompressed = uncompressed[start:end] - for { - i := bytes.Index(uncompressed, termSeparatorSplitSlice) - if i < 0 { - break - } - - visitor(di.field, uncompressed[0:i]) - uncompressed = uncompressed[i+1:] - } - - return nil -} - -func (di *docValueReader) getDocValueLocs(docNum uint64) (uint64, uint64) { - i := sort.Search(len(di.curChunkHeader), func(i int) bool { - return di.curChunkHeader[i].DocNum >= docNum - }) - if i < len(di.curChunkHeader) && di.curChunkHeader[i].DocNum == docNum { - return ReadDocValueBoundary(i, di.curChunkHeader) - } - return math.MaxUint64, math.MaxUint64 -} - -// VisitDocValues is an implementation of the -// DocValueVisitable interface -func (s *SegmentBase) VisitDocValues(localDocNum uint64, fields []string, - visitor index.DocValueVisitor, dvsIn segment.DocVisitState) ( - segment.DocVisitState, error) { - dvs, ok := dvsIn.(*docVisitState) - if !ok || dvs == nil { - dvs = &docVisitState{} - } else { - if dvs.segment != s { - dvs.segment = s - dvs.dvrs = nil - } - } - - var fieldIDPlus1 uint16 - if dvs.dvrs == nil { - dvs.dvrs = make(map[uint16]*docValueReader, len(fields)) - for _, field := range fields { - if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { - continue - } - fieldID := fieldIDPlus1 - 1 - if dvIter, exists := s.fieldDvReaders[fieldID]; exists && - dvIter != nil { - dvs.dvrs[fieldID] = dvIter.cloneInto(dvs.dvrs[fieldID]) - } - } - } - - // find the chunkNumber where the docValues are stored - docInChunk := localDocNum / uint64(s.chunkFactor) - var dvr *docValueReader - for _, field := range fields { - if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { - continue - } - fieldID := fieldIDPlus1 - 1 - if dvr, ok = dvs.dvrs[fieldID]; ok && dvr != nil { - // check if the chunk is already loaded - if docInChunk != dvr.curChunkNumber() { - err := dvr.loadDvChunk(docInChunk, s) - if err != nil { - return dvs, err - } - } - - _ = dvr.visitDocValues(localDocNum, visitor) - } - } - return dvs, nil -} - -// VisitableDocValueFields returns the list of fields with -// persisted doc value terms ready to be visitable using the -// VisitDocumentFieldTerms method. -func (s *SegmentBase) VisitableDocValueFields() ([]string, error) { - return s.fieldDvNames, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v11/enumerator.go b/vendor/github.com/blevesearch/zapx/v11/enumerator.go deleted file mode 100644 index 5531d2cf1..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/enumerator.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - - "github.com/blevesearch/vellum" -) - -// enumerator provides an ordered traversal of multiple vellum -// iterators. Like JOIN of iterators, the enumerator produces a -// sequence of (key, iteratorIndex, value) tuples, sorted by key ASC, -// then iteratorIndex ASC, where the same key might be seen or -// repeated across multiple child iterators. -type enumerator struct { - itrs []vellum.Iterator - currKs [][]byte - currVs []uint64 - - lowK []byte - lowIdxs []int - lowCurr int -} - -// newEnumerator returns a new enumerator over the vellum Iterators -func newEnumerator(itrs []vellum.Iterator) (*enumerator, error) { - rv := &enumerator{ - itrs: itrs, - currKs: make([][]byte, len(itrs)), - currVs: make([]uint64, len(itrs)), - lowIdxs: make([]int, 0, len(itrs)), - } - for i, itr := range rv.itrs { - rv.currKs[i], rv.currVs[i] = itr.Current() - } - rv.updateMatches(false) - if rv.lowK == nil && len(rv.lowIdxs) == 0 { - return rv, vellum.ErrIteratorDone - } - return rv, nil -} - -// updateMatches maintains the low key matches based on the currKs -func (m *enumerator) updateMatches(skipEmptyKey bool) { - m.lowK = nil - m.lowIdxs = m.lowIdxs[:0] - m.lowCurr = 0 - - for i, key := range m.currKs { - if (key == nil && m.currVs[i] == 0) || // in case of empty iterator - (len(key) == 0 && skipEmptyKey) { // skip empty keys - continue - } - - cmp := bytes.Compare(key, m.lowK) - if cmp < 0 || len(m.lowIdxs) == 0 { - // reached a new low - m.lowK = key - m.lowIdxs = m.lowIdxs[:0] - m.lowIdxs = append(m.lowIdxs, i) - } else if cmp == 0 { - m.lowIdxs = append(m.lowIdxs, i) - } - } -} - -// Current returns the enumerator's current key, iterator-index, and -// value. If the enumerator is not pointing at a valid value (because -// Next returned an error previously), Current will return nil,0,0. -func (m *enumerator) Current() ([]byte, int, uint64) { - var i int - var v uint64 - if m.lowCurr < len(m.lowIdxs) { - i = m.lowIdxs[m.lowCurr] - v = m.currVs[i] - } - return m.lowK, i, v -} - -// Next advances the enumerator to the next key/iterator/value result, -// else vellum.ErrIteratorDone is returned. -func (m *enumerator) Next() error { - m.lowCurr += 1 - if m.lowCurr >= len(m.lowIdxs) { - // move all the current low iterators forwards - for _, vi := range m.lowIdxs { - err := m.itrs[vi].Next() - if err != nil && err != vellum.ErrIteratorDone { - return err - } - m.currKs[vi], m.currVs[vi] = m.itrs[vi].Current() - } - // can skip any empty keys encountered at this point - m.updateMatches(true) - } - if m.lowK == nil && len(m.lowIdxs) == 0 { - return vellum.ErrIteratorDone - } - return nil -} - -// Close all the underlying Iterators. The first error, if any, will -// be returned. -func (m *enumerator) Close() error { - var rv error - for _, itr := range m.itrs { - err := itr.Close() - if rv == nil { - rv = err - } - } - return rv -} diff --git a/vendor/github.com/blevesearch/zapx/v11/go.mod b/vendor/github.com/blevesearch/zapx/v11/go.mod deleted file mode 100644 index 394dbb6a5..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/blevesearch/zapx/v11 - -go 1.12 - -require ( - github.com/RoaringBitmap/roaring v0.4.23 - github.com/blevesearch/bleve_index_api v1.0.0 - github.com/blevesearch/mmap-go v1.0.2 - github.com/blevesearch/scorch_segment_api/v2 v2.0.1 - github.com/blevesearch/vellum v1.0.3 - github.com/golang/snappy v0.0.1 - github.com/spf13/cobra v0.0.5 -) diff --git a/vendor/github.com/blevesearch/zapx/v11/go.sum b/vendor/github.com/blevesearch/zapx/v11/go.sum deleted file mode 100644 index 68e45348c..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/go.sum +++ /dev/null @@ -1,73 +0,0 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= -github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= -github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= -github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1 h1:fd+hPtZ8GsbqPK1HslGp7Vhoik4arZteA/IsCEgOisw= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1/go.mod h1:lq7yK2jQy1yQjtjTfU931aVqz7pYxEudHaDwOt1tXfU= -github.com/blevesearch/vellum v1.0.3 h1:U86G41A7CtXNzzpIJHM8lSTUqz1Mp8U870TkcdCzZc8= -github.com/blevesearch/vellum v1.0.3/go.mod h1:2u5ax02KeDuNWu4/C+hVQMD6uLN4txH1JbtpaDNLJRo= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= -github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/blevesearch/zapx/v11/intcoder.go b/vendor/github.com/blevesearch/zapx/v11/intcoder.go deleted file mode 100644 index 571d06edb..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/intcoder.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "io" -) - -type chunkedIntCoder struct { - final []byte - chunkSize uint64 - chunkBuf bytes.Buffer - chunkLens []uint64 - currChunk uint64 - - buf []byte -} - -// newChunkedIntCoder returns a new chunk int coder which packs data into -// chunks based on the provided chunkSize and supports up to the specified -// maxDocNum -func newChunkedIntCoder(chunkSize uint64, maxDocNum uint64) *chunkedIntCoder { - total := maxDocNum/chunkSize + 1 - rv := &chunkedIntCoder{ - chunkSize: chunkSize, - chunkLens: make([]uint64, total), - final: make([]byte, 0, 64), - } - - return rv -} - -// Reset lets you reuse this chunked int coder. buffers are reset and reused -// from previous use. you cannot change the chunk size or max doc num. -func (c *chunkedIntCoder) Reset() { - c.final = c.final[:0] - c.chunkBuf.Reset() - c.currChunk = 0 - for i := range c.chunkLens { - c.chunkLens[i] = 0 - } -} - -// Add encodes the provided integers into the correct chunk for the provided -// doc num. You MUST call Add() with increasing docNums. -func (c *chunkedIntCoder) Add(docNum uint64, vals ...uint64) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // starting a new chunk - c.Close() - c.chunkBuf.Reset() - c.currChunk = chunk - } - - if len(c.buf) < binary.MaxVarintLen64 { - c.buf = make([]byte, binary.MaxVarintLen64) - } - - for _, val := range vals { - wb := binary.PutUvarint(c.buf, val) - _, err := c.chunkBuf.Write(c.buf[:wb]) - if err != nil { - return err - } - } - - return nil -} - -func (c *chunkedIntCoder) AddBytes(docNum uint64, buf []byte) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // starting a new chunk - c.Close() - c.chunkBuf.Reset() - c.currChunk = chunk - } - - _, err := c.chunkBuf.Write(buf) - return err -} - -// Close indicates you are done calling Add() this allows the final chunk -// to be encoded. -func (c *chunkedIntCoder) Close() { - encodingBytes := c.chunkBuf.Bytes() - c.chunkLens[c.currChunk] = uint64(len(encodingBytes)) - c.final = append(c.final, encodingBytes...) - c.currChunk = uint64(cap(c.chunkLens)) // sentinel to detect double close -} - -// Write commits all the encoded chunked integers to the provided writer. -func (c *chunkedIntCoder) Write(w io.Writer) (int, error) { - bufNeeded := binary.MaxVarintLen64 * (1 + len(c.chunkLens)) - if len(c.buf) < bufNeeded { - c.buf = make([]byte, bufNeeded) - } - buf := c.buf - - // convert the chunk lengths into chunk offsets - chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens) - - // write out the number of chunks & each chunk offsets - n := binary.PutUvarint(buf, uint64(len(chunkOffsets))) - for _, chunkOffset := range chunkOffsets { - n += binary.PutUvarint(buf[n:], chunkOffset) - } - - tw, err := w.Write(buf[:n]) - if err != nil { - return tw, err - } - - // write out the data - nw, err := w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - return tw, nil -} - -func (c *chunkedIntCoder) FinalSize() int { - return len(c.final) -} - -// modifyLengthsToEndOffsets converts the chunk length array -// to a chunk offset array. The readChunkBoundary -// will figure out the start and end of every chunk from -// these offsets. Starting offset of i'th index is stored -// in i-1'th position except for 0'th index and ending offset -// is stored at i'th index position. -// For 0'th element, starting position is always zero. -// eg: -// Lens -> 5 5 5 5 => 5 10 15 20 -// Lens -> 0 5 0 5 => 0 5 5 10 -// Lens -> 0 0 0 5 => 0 0 0 5 -// Lens -> 5 0 0 0 => 5 5 5 5 -// Lens -> 0 5 0 0 => 0 5 5 5 -// Lens -> 0 0 5 0 => 0 0 5 5 -func modifyLengthsToEndOffsets(lengths []uint64) []uint64 { - var runningOffset uint64 - var index, i int - for i = 1; i <= len(lengths); i++ { - runningOffset += lengths[i-1] - lengths[index] = runningOffset - index++ - } - return lengths -} - -func readChunkBoundary(chunk int, offsets []uint64) (uint64, uint64) { - var start uint64 - if chunk > 0 { - start = offsets[chunk-1] - } - return start, offsets[chunk] -} diff --git a/vendor/github.com/blevesearch/zapx/v11/memuvarint.go b/vendor/github.com/blevesearch/zapx/v11/memuvarint.go deleted file mode 100644 index 0c10c83a4..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/memuvarint.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" -) - -type memUvarintReader struct { - C int // index of next byte to read from S - S []byte -} - -func newMemUvarintReader(s []byte) *memUvarintReader { - return &memUvarintReader{S: s} -} - -// Len returns the number of unread bytes. -func (r *memUvarintReader) Len() int { - n := len(r.S) - r.C - if n < 0 { - return 0 - } - return n -} - -// ReadUvarint reads an encoded uint64. The original code this was -// based on is at encoding/binary/ReadUvarint(). -func (r *memUvarintReader) ReadUvarint() (uint64, error) { - var x uint64 - var s uint - var C = r.C - var S = r.S - - for { - b := S[C] - C++ - - if b < 0x80 { - r.C = C - - // why 63? The original code had an 'i += 1' loop var and - // checked for i > 9 || i == 9 ...; but, we no longer - // check for the i var, but instead check here for s, - // which is incremented by 7. So, 7*9 == 63. - // - // why the "extra" >= check? The normal case is that s < - // 63, so we check this single >= guard first so that we - // hit the normal, nil-error return pathway sooner. - if s >= 63 && (s > 63 || s == 63 && b > 1) { - return 0, fmt.Errorf("memUvarintReader overflow") - } - - return x | uint64(b)< 0 { - storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops, - fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - - dictLocs, docValueOffset, err = persistMergedRest(segments, drops, - fieldsInv, fieldsMap, fieldsSame, - newDocNums, numDocs, chunkFactor, cr, closeCh) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - } else { - dictLocs = make([]uint64, len(fieldsInv)) - } - - fieldsIndexOffset, err = persistFields(fieldsInv, cr, dictLocs) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - - return newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, dictLocs, fieldsInv, fieldsMap, nil -} - -// mapFields takes the fieldsInv list and returns a map of fieldName -// to fieldID+1 -func mapFields(fields []string) map[string]uint16 { - rv := make(map[string]uint16, len(fields)) - for i, fieldName := range fields { - rv[fieldName] = uint16(i) + 1 - } - return rv -} - -// computeNewDocCount determines how many documents will be in the newly -// merged segment when obsoleted docs are dropped -func computeNewDocCount(segments []*SegmentBase, drops []*roaring.Bitmap) uint64 { - var newDocCount uint64 - for segI, segment := range segments { - newDocCount += segment.numDocs - if drops[segI] != nil { - newDocCount -= drops[segI].GetCardinality() - } - } - return newDocCount -} - -func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap, - fieldsInv []string, fieldsMap map[string]uint16, fieldsSame bool, - newDocNumsIn [][]uint64, newSegDocCount uint64, chunkFactor uint32, - w *CountHashWriter, closeCh chan struct{}) ([]uint64, uint64, error) { - var bufMaxVarintLen64 []byte = make([]byte, binary.MaxVarintLen64) - var bufLoc []uint64 - - var postings *PostingsList - var postItr *PostingsIterator - - rv := make([]uint64, len(fieldsInv)) - fieldDvLocsStart := make([]uint64, len(fieldsInv)) - fieldDvLocsEnd := make([]uint64, len(fieldsInv)) - - tfEncoder := newChunkedIntCoder(uint64(chunkFactor), newSegDocCount-1) - locEncoder := newChunkedIntCoder(uint64(chunkFactor), newSegDocCount-1) - - var vellumBuf bytes.Buffer - newVellum, err := vellum.New(&vellumBuf, nil) - if err != nil { - return nil, 0, err - } - - newRoaring := roaring.NewBitmap() - - // for each field - for fieldID, fieldName := range fieldsInv { - // collect FST iterators from all active segments for this field - var newDocNums [][]uint64 - var drops []*roaring.Bitmap - var dicts []*Dictionary - var itrs []vellum.Iterator - - var segmentsInFocus []*SegmentBase - - for segmentI, segment := range segments { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - dict, err2 := segment.dictionary(fieldName) - if err2 != nil { - return nil, 0, err2 - } - if dict != nil && dict.fst != nil { - itr, err2 := dict.fst.Iterator(nil, nil) - if err2 != nil && err2 != vellum.ErrIteratorDone { - return nil, 0, err2 - } - if itr != nil { - newDocNums = append(newDocNums, newDocNumsIn[segmentI]) - if dropsIn[segmentI] != nil && !dropsIn[segmentI].IsEmpty() { - drops = append(drops, dropsIn[segmentI]) - } else { - drops = append(drops, nil) - } - dicts = append(dicts, dict) - itrs = append(itrs, itr) - segmentsInFocus = append(segmentsInFocus, segment) - } - } - } - - var prevTerm []byte - - newRoaring.Clear() - - var lastDocNum, lastFreq, lastNorm uint64 - - // determines whether to use "1-hit" encoding optimization - // when a term appears in only 1 doc, with no loc info, - // has freq of 1, and the docNum fits into 31-bits - use1HitEncoding := func(termCardinality uint64) (bool, uint64, uint64) { - if termCardinality == uint64(1) && locEncoder.FinalSize() <= 0 { - docNum := uint64(newRoaring.Minimum()) - if under32Bits(docNum) && docNum == lastDocNum && lastFreq == 1 { - return true, docNum, lastNorm - } - } - return false, 0, 0 - } - - finishTerm := func(term []byte) error { - tfEncoder.Close() - locEncoder.Close() - - postingsOffset, err := writePostings(newRoaring, - tfEncoder, locEncoder, use1HitEncoding, w, bufMaxVarintLen64) - if err != nil { - return err - } - - if postingsOffset > 0 { - err = newVellum.Insert(term, postingsOffset) - if err != nil { - return err - } - } - - newRoaring.Clear() - - tfEncoder.Reset() - locEncoder.Reset() - - lastDocNum = 0 - lastFreq = 0 - lastNorm = 0 - - return nil - } - - enumerator, err := newEnumerator(itrs) - - for err == nil { - term, itrI, postingsOffset := enumerator.Current() - - if !bytes.Equal(prevTerm, term) { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - // if the term changed, write out the info collected - // for the previous term - err = finishTerm(prevTerm) - if err != nil { - return nil, 0, err - } - } - - postings, err = dicts[itrI].postingsListFromOffset( - postingsOffset, drops[itrI], postings) - if err != nil { - return nil, 0, err - } - - postItr = postings.iterator(true, true, true, postItr) - - if fieldsSame { - // can optimize by copying freq/norm/loc bytes directly - lastDocNum, lastFreq, lastNorm, err = mergeTermFreqNormLocsByCopying( - term, postItr, newDocNums[itrI], newRoaring, - tfEncoder, locEncoder) - } else { - lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs( - fieldsMap, term, postItr, newDocNums[itrI], newRoaring, - tfEncoder, locEncoder, bufLoc) - } - if err != nil { - return nil, 0, err - } - - prevTerm = prevTerm[:0] // copy to prevTerm in case Next() reuses term mem - prevTerm = append(prevTerm, term...) - - err = enumerator.Next() - } - if err != vellum.ErrIteratorDone { - return nil, 0, err - } - - err = finishTerm(prevTerm) - if err != nil { - return nil, 0, err - } - - dictOffset := uint64(w.Count()) - - err = newVellum.Close() - if err != nil { - return nil, 0, err - } - vellumData := vellumBuf.Bytes() - - // write out the length of the vellum data - n := binary.PutUvarint(bufMaxVarintLen64, uint64(len(vellumData))) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return nil, 0, err - } - - // write this vellum to disk - _, err = w.Write(vellumData) - if err != nil { - return nil, 0, err - } - - rv[fieldID] = dictOffset - - // get the field doc value offset (start) - fieldDvLocsStart[fieldID] = uint64(w.Count()) - - // update the field doc values - fdvEncoder := newChunkedContentCoder(uint64(chunkFactor), newSegDocCount-1, w, true) - - fdvReadersAvailable := false - var dvIterClone *docValueReader - for segmentI, segment := range segmentsInFocus { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - fieldIDPlus1 := uint16(segment.fieldsMap[fieldName]) - if dvIter, exists := segment.fieldDvReaders[fieldIDPlus1-1]; exists && - dvIter != nil { - fdvReadersAvailable = true - dvIterClone = dvIter.cloneInto(dvIterClone) - err = dvIterClone.iterateAllDocValues(segment, func(docNum uint64, terms []byte) error { - if newDocNums[segmentI][docNum] == docDropped { - return nil - } - err := fdvEncoder.Add(newDocNums[segmentI][docNum], terms) - if err != nil { - return err - } - return nil - }) - if err != nil { - return nil, 0, err - } - } - } - - if fdvReadersAvailable { - err = fdvEncoder.Close() - if err != nil { - return nil, 0, err - } - - // persist the doc value details for this field - _, err = fdvEncoder.Write() - if err != nil { - return nil, 0, err - } - - // get the field doc value offset (end) - fieldDvLocsEnd[fieldID] = uint64(w.Count()) - } else { - fieldDvLocsStart[fieldID] = fieldNotUninverted - fieldDvLocsEnd[fieldID] = fieldNotUninverted - } - - // reset vellum buffer and vellum builder - vellumBuf.Reset() - err = newVellum.Reset(&vellumBuf) - if err != nil { - return nil, 0, err - } - } - - fieldDvLocsOffset := uint64(w.Count()) - - buf := bufMaxVarintLen64 - for i := 0; i < len(fieldDvLocsStart); i++ { - n := binary.PutUvarint(buf, fieldDvLocsStart[i]) - _, err := w.Write(buf[:n]) - if err != nil { - return nil, 0, err - } - n = binary.PutUvarint(buf, fieldDvLocsEnd[i]) - _, err = w.Write(buf[:n]) - if err != nil { - return nil, 0, err - } - } - - return rv, fieldDvLocsOffset, nil -} - -func mergeTermFreqNormLocs(fieldsMap map[string]uint16, term []byte, postItr *PostingsIterator, - newDocNums []uint64, newRoaring *roaring.Bitmap, - tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, bufLoc []uint64) ( - lastDocNum uint64, lastFreq uint64, lastNorm uint64, bufLocOut []uint64, err error) { - next, err := postItr.Next() - for next != nil && err == nil { - hitNewDocNum := newDocNums[next.Number()] - if hitNewDocNum == docDropped { - return 0, 0, 0, nil, fmt.Errorf("see hit with dropped docNum") - } - - newRoaring.Add(uint32(hitNewDocNum)) - - nextFreq := next.Frequency() - nextNorm := uint64(math.Float32bits(float32(next.Norm()))) - - locs := next.Locations() - - err = tfEncoder.Add(hitNewDocNum, - encodeFreqHasLocs(nextFreq, len(locs) > 0), nextNorm) - if err != nil { - return 0, 0, 0, nil, err - } - - if len(locs) > 0 { - numBytesLocs := 0 - for _, loc := range locs { - ap := loc.ArrayPositions() - numBytesLocs += totalUvarintBytes(uint64(fieldsMap[loc.Field()]-1), - loc.Pos(), loc.Start(), loc.End(), uint64(len(ap)), ap) - } - - err = locEncoder.Add(hitNewDocNum, uint64(numBytesLocs)) - if err != nil { - return 0, 0, 0, nil, err - } - - for _, loc := range locs { - ap := loc.ArrayPositions() - if cap(bufLoc) < 5+len(ap) { - bufLoc = make([]uint64, 0, 5+len(ap)) - } - args := bufLoc[0:5] - args[0] = uint64(fieldsMap[loc.Field()] - 1) - args[1] = loc.Pos() - args[2] = loc.Start() - args[3] = loc.End() - args[4] = uint64(len(ap)) - args = append(args, ap...) - err = locEncoder.Add(hitNewDocNum, args...) - if err != nil { - return 0, 0, 0, nil, err - } - } - } - - lastDocNum = hitNewDocNum - lastFreq = nextFreq - lastNorm = nextNorm - - next, err = postItr.Next() - } - - return lastDocNum, lastFreq, lastNorm, bufLoc, err -} - -func mergeTermFreqNormLocsByCopying(term []byte, postItr *PostingsIterator, - newDocNums []uint64, newRoaring *roaring.Bitmap, - tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder) ( - lastDocNum uint64, lastFreq uint64, lastNorm uint64, err error) { - nextDocNum, nextFreq, nextNorm, nextFreqNormBytes, nextLocBytes, err := - postItr.nextBytes() - for err == nil && len(nextFreqNormBytes) > 0 { - hitNewDocNum := newDocNums[nextDocNum] - if hitNewDocNum == docDropped { - return 0, 0, 0, fmt.Errorf("see hit with dropped doc num") - } - - newRoaring.Add(uint32(hitNewDocNum)) - err = tfEncoder.AddBytes(hitNewDocNum, nextFreqNormBytes) - if err != nil { - return 0, 0, 0, err - } - - if len(nextLocBytes) > 0 { - err = locEncoder.AddBytes(hitNewDocNum, nextLocBytes) - if err != nil { - return 0, 0, 0, err - } - } - - lastDocNum = hitNewDocNum - lastFreq = nextFreq - lastNorm = nextNorm - - nextDocNum, nextFreq, nextNorm, nextFreqNormBytes, nextLocBytes, err = - postItr.nextBytes() - } - - return lastDocNum, lastFreq, lastNorm, err -} - -func writePostings(postings *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder, - use1HitEncoding func(uint64) (bool, uint64, uint64), - w *CountHashWriter, bufMaxVarintLen64 []byte) ( - offset uint64, err error) { - termCardinality := postings.GetCardinality() - if termCardinality <= 0 { - return 0, nil - } - - if use1HitEncoding != nil { - encodeAs1Hit, docNum1Hit, normBits1Hit := use1HitEncoding(termCardinality) - if encodeAs1Hit { - return FSTValEncode1Hit(docNum1Hit, normBits1Hit), nil - } - } - - tfOffset := uint64(w.Count()) - _, err = tfEncoder.Write(w) - if err != nil { - return 0, err - } - - locOffset := uint64(w.Count()) - _, err = locEncoder.Write(w) - if err != nil { - return 0, err - } - - postingsOffset := uint64(w.Count()) - - n := binary.PutUvarint(bufMaxVarintLen64, tfOffset) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return 0, err - } - - n = binary.PutUvarint(bufMaxVarintLen64, locOffset) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return 0, err - } - - _, err = writeRoaringWithLen(postings, w, bufMaxVarintLen64) - if err != nil { - return 0, err - } - - return postingsOffset, nil -} - -type varintEncoder func(uint64) (int, error) - -func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap, - fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64, - w *CountHashWriter, closeCh chan struct{}) (uint64, [][]uint64, error) { - var rv [][]uint64 // The remapped or newDocNums for each segment. - - var newDocNum uint64 - - var curr int - var data, compressed []byte - var metaBuf bytes.Buffer - varBuf := make([]byte, binary.MaxVarintLen64) - metaEncode := func(val uint64) (int, error) { - wb := binary.PutUvarint(varBuf, val) - return metaBuf.Write(varBuf[:wb]) - } - - vals := make([][][]byte, len(fieldsInv)) - typs := make([][]byte, len(fieldsInv)) - poss := make([][][]uint64, len(fieldsInv)) - - var posBuf []uint64 - - docNumOffsets := make([]uint64, newSegDocCount) - - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - defer visitDocumentCtxPool.Put(vdc) - - // for each segment - for segI, segment := range segments { - // check for the closure in meantime - if isClosed(closeCh) { - return 0, nil, seg.ErrClosed - } - - segNewDocNums := make([]uint64, segment.numDocs) - - dropsI := drops[segI] - - // optimize when the field mapping is the same across all - // segments and there are no deletions, via byte-copying - // of stored docs bytes directly to the writer - if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) { - err := segment.copyStoredDocs(newDocNum, docNumOffsets, w) - if err != nil { - return 0, nil, err - } - - for i := uint64(0); i < segment.numDocs; i++ { - segNewDocNums[i] = newDocNum - newDocNum++ - } - rv = append(rv, segNewDocNums) - - continue - } - - // for each doc num - for docNum := uint64(0); docNum < segment.numDocs; docNum++ { - // TODO: roaring's API limits docNums to 32-bits? - if dropsI != nil && dropsI.Contains(uint32(docNum)) { - segNewDocNums[docNum] = docDropped - continue - } - - segNewDocNums[docNum] = newDocNum - - curr = 0 - metaBuf.Reset() - data = data[:0] - - posTemp := posBuf - - // collect all the data - for i := 0; i < len(fieldsInv); i++ { - vals[i] = vals[i][:0] - typs[i] = typs[i][:0] - poss[i] = poss[i][:0] - } - err := segment.visitStoredFields(vdc, docNum, func(field string, typ byte, value []byte, pos []uint64) bool { - fieldID := int(fieldsMap[field]) - 1 - vals[fieldID] = append(vals[fieldID], value) - typs[fieldID] = append(typs[fieldID], typ) - - // copy array positions to preserve them beyond the scope of this callback - var curPos []uint64 - if len(pos) > 0 { - if cap(posTemp) < len(pos) { - posBuf = make([]uint64, len(pos)*len(fieldsInv)) - posTemp = posBuf - } - curPos = posTemp[0:len(pos)] - copy(curPos, pos) - posTemp = posTemp[len(pos):] - } - poss[fieldID] = append(poss[fieldID], curPos) - - return true - }) - if err != nil { - return 0, nil, err - } - - // _id field special case optimizes ExternalID() lookups - idFieldVal := vals[uint16(0)][0] - _, err = metaEncode(uint64(len(idFieldVal))) - if err != nil { - return 0, nil, err - } - - // now walk the non-"_id" fields in order - for fieldID := 1; fieldID < len(fieldsInv); fieldID++ { - storedFieldValues := vals[fieldID] - - stf := typs[fieldID] - spf := poss[fieldID] - - var err2 error - curr, data, err2 = persistStoredFieldValues(fieldID, - storedFieldValues, stf, spf, curr, metaEncode, data) - if err2 != nil { - return 0, nil, err2 - } - } - - metaBytes := metaBuf.Bytes() - - compressed = snappy.Encode(compressed[:cap(compressed)], data) - - // record where we're about to start writing - docNumOffsets[newDocNum] = uint64(w.Count()) - - // write out the meta len and compressed data len - _, err = writeUvarints(w, - uint64(len(metaBytes)), - uint64(len(idFieldVal)+len(compressed))) - if err != nil { - return 0, nil, err - } - // now write the meta - _, err = w.Write(metaBytes) - if err != nil { - return 0, nil, err - } - // now write the _id field val (counted as part of the 'compressed' data) - _, err = w.Write(idFieldVal) - if err != nil { - return 0, nil, err - } - // now write the compressed data - _, err = w.Write(compressed) - if err != nil { - return 0, nil, err - } - - newDocNum++ - } - - rv = append(rv, segNewDocNums) - } - - // return value is the start of the stored index - storedIndexOffset := uint64(w.Count()) - - // now write out the stored doc index - for _, docNumOffset := range docNumOffsets { - err := binary.Write(w, binary.BigEndian, docNumOffset) - if err != nil { - return 0, nil, err - } - } - - return storedIndexOffset, rv, nil -} - -// copyStoredDocs writes out a segment's stored doc info, optimized by -// using a single Write() call for the entire set of bytes. The -// newDocNumOffsets is filled with the new offsets for each doc. -func (s *SegmentBase) copyStoredDocs(newDocNum uint64, newDocNumOffsets []uint64, - w *CountHashWriter) error { - if s.numDocs <= 0 { - return nil - } - - indexOffset0, storedOffset0, _, _, _ := - s.getDocStoredOffsets(0) // the segment's first doc - - indexOffsetN, storedOffsetN, readN, metaLenN, dataLenN := - s.getDocStoredOffsets(s.numDocs - 1) // the segment's last doc - - storedOffset0New := uint64(w.Count()) - - storedBytes := s.mem[storedOffset0 : storedOffsetN+readN+metaLenN+dataLenN] - _, err := w.Write(storedBytes) - if err != nil { - return err - } - - // remap the storedOffset's for the docs into new offsets relative - // to storedOffset0New, filling the given docNumOffsetsOut array - for indexOffset := indexOffset0; indexOffset <= indexOffsetN; indexOffset += 8 { - storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) - storedOffsetNew := storedOffset - storedOffset0 + storedOffset0New - newDocNumOffsets[newDocNum] = storedOffsetNew - newDocNum += 1 - } - - return nil -} - -// mergeFields builds a unified list of fields used across all the -// input segments, and computes whether the fields are the same across -// segments (which depends on fields to be sorted in the same way -// across segments) -func mergeFields(segments []*SegmentBase) (bool, []string) { - fieldsSame := true - - var segment0Fields []string - if len(segments) > 0 { - segment0Fields = segments[0].Fields() - } - - fieldsExist := map[string]struct{}{} - for _, segment := range segments { - fields := segment.Fields() - for fieldi, field := range fields { - fieldsExist[field] = struct{}{} - if len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field { - fieldsSame = false - } - } - } - - rv := make([]string, 0, len(fieldsExist)) - // ensure _id stays first - rv = append(rv, "_id") - for k := range fieldsExist { - if k != "_id" { - rv = append(rv, k) - } - } - - sort.Strings(rv[1:]) // leave _id as first - - return fieldsSame, rv -} - -func isClosed(closeCh chan struct{}) bool { - select { - case <-closeCh: - return true - default: - return false - } -} diff --git a/vendor/github.com/blevesearch/zapx/v11/new.go b/vendor/github.com/blevesearch/zapx/v11/new.go deleted file mode 100644 index 4491422aa..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/new.go +++ /dev/null @@ -1,817 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "math" - "sort" - "sync" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - "github.com/golang/snappy" -) - -var NewSegmentBufferNumResultsBump int = 100 -var NewSegmentBufferNumResultsFactor float64 = 1.0 -var NewSegmentBufferAvgBytesPerDocFactor float64 = 1.0 - -// ValidateDocFields can be set by applications to perform additional checks -// on fields in a document being added to a new segment, by default it does -// nothing. -// This API is experimental and may be removed at any time. -var ValidateDocFields = func(field index.Field) error { - return nil -} - -var defaultChunkFactor uint32 = 1024 - -// New creates an in-memory zap-encoded SegmentBase from a set of Documents -func (z *ZapPlugin) New(results []index.Document) ( - segment.Segment, uint64, error) { - return z.newWithChunkFactor(results, defaultChunkFactor) -} - -func (*ZapPlugin) newWithChunkFactor(results []index.Document, - chunkFactor uint32) (segment.Segment, uint64, error) { - s := interimPool.Get().(*interim) - - var br bytes.Buffer - if s.lastNumDocs > 0 { - // use previous results to initialize the buf with an estimate - // size, but note that the interim instance comes from a - // global interimPool, so multiple scorch instances indexing - // different docs can lead to low quality estimates - estimateAvgBytesPerDoc := int(float64(s.lastOutSize/s.lastNumDocs) * - NewSegmentBufferNumResultsFactor) - estimateNumResults := int(float64(len(results)+NewSegmentBufferNumResultsBump) * - NewSegmentBufferAvgBytesPerDocFactor) - br.Grow(estimateAvgBytesPerDoc * estimateNumResults) - } - - s.results = results - s.chunkFactor = chunkFactor - s.w = NewCountHashWriter(&br) - - storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, - err := s.convert() - if err != nil { - return nil, uint64(0), err - } - - sb, err := InitSegmentBase(br.Bytes(), s.w.Sum32(), chunkFactor, - s.FieldsMap, s.FieldsInv, uint64(len(results)), - storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets) - - if err == nil && s.reset() == nil { - s.lastNumDocs = len(results) - s.lastOutSize = len(br.Bytes()) - interimPool.Put(s) - } - - return sb, uint64(len(br.Bytes())), err -} - -var interimPool = sync.Pool{New: func() interface{} { return &interim{} }} - -// interim holds temporary working data used while converting from -// analysis results to a zap-encoded segment -type interim struct { - results []index.Document - - chunkFactor uint32 - - w *CountHashWriter - - // FieldsMap adds 1 to field id to avoid zero value issues - // name -> field id + 1 - FieldsMap map[string]uint16 - - // FieldsInv is the inverse of FieldsMap - // field id -> name - FieldsInv []string - - // Term dictionaries for each field - // field id -> term -> postings list id + 1 - Dicts []map[string]uint64 - - // Terms for each field, where terms are sorted ascending - // field id -> []term - DictKeys [][]string - - // Fields whose IncludeDocValues is true - // field id -> bool - IncludeDocValues []bool - - // postings id -> bitmap of docNums - Postings []*roaring.Bitmap - - // postings id -> freq/norm's, one for each docNum in postings - FreqNorms [][]interimFreqNorm - freqNormsBacking []interimFreqNorm - - // postings id -> locs, one for each freq - Locs [][]interimLoc - locsBacking []interimLoc - - numTermsPerPostingsList []int // key is postings list id - numLocsPerPostingsList []int // key is postings list id - - builder *vellum.Builder - builderBuf bytes.Buffer - - metaBuf bytes.Buffer - - tmp0 []byte - tmp1 []byte - - lastNumDocs int - lastOutSize int -} - -func (s *interim) reset() (err error) { - s.results = nil - s.chunkFactor = 0 - s.w = nil - s.FieldsMap = nil - s.FieldsInv = nil - for i := range s.Dicts { - s.Dicts[i] = nil - } - s.Dicts = s.Dicts[:0] - for i := range s.DictKeys { - s.DictKeys[i] = s.DictKeys[i][:0] - } - s.DictKeys = s.DictKeys[:0] - for i := range s.IncludeDocValues { - s.IncludeDocValues[i] = false - } - s.IncludeDocValues = s.IncludeDocValues[:0] - for _, idn := range s.Postings { - idn.Clear() - } - s.Postings = s.Postings[:0] - s.FreqNorms = s.FreqNorms[:0] - for i := range s.freqNormsBacking { - s.freqNormsBacking[i] = interimFreqNorm{} - } - s.freqNormsBacking = s.freqNormsBacking[:0] - s.Locs = s.Locs[:0] - for i := range s.locsBacking { - s.locsBacking[i] = interimLoc{} - } - s.locsBacking = s.locsBacking[:0] - s.numTermsPerPostingsList = s.numTermsPerPostingsList[:0] - s.numLocsPerPostingsList = s.numLocsPerPostingsList[:0] - s.builderBuf.Reset() - if s.builder != nil { - err = s.builder.Reset(&s.builderBuf) - } - s.metaBuf.Reset() - s.tmp0 = s.tmp0[:0] - s.tmp1 = s.tmp1[:0] - s.lastNumDocs = 0 - s.lastOutSize = 0 - - return err -} - -func (s *interim) grabBuf(size int) []byte { - buf := s.tmp0 - if cap(buf) < size { - buf = make([]byte, size) - s.tmp0 = buf - } - return buf[0:size] -} - -type interimStoredField struct { - vals [][]byte - typs []byte - arrayposs [][]uint64 // array positions -} - -type interimFreqNorm struct { - freq uint64 - norm float32 - numLocs int -} - -type interimLoc struct { - fieldID uint16 - pos uint64 - start uint64 - end uint64 - arrayposs []uint64 -} - -func (s *interim) convert() (uint64, uint64, uint64, []uint64, error) { - s.FieldsMap = map[string]uint16{} - - s.getOrDefineField("_id") // _id field is fieldID 0 - - for _, result := range s.results { - result.VisitComposite(func(field index.CompositeField) { - s.getOrDefineField(field.Name()) - }) - result.VisitFields(func(field index.Field) { - s.getOrDefineField(field.Name()) - }) - } - - sort.Strings(s.FieldsInv[1:]) // keep _id as first field - - for fieldID, fieldName := range s.FieldsInv { - s.FieldsMap[fieldName] = uint16(fieldID + 1) - } - - if cap(s.IncludeDocValues) >= len(s.FieldsInv) { - s.IncludeDocValues = s.IncludeDocValues[:len(s.FieldsInv)] - } else { - s.IncludeDocValues = make([]bool, len(s.FieldsInv)) - } - - s.prepareDicts() - - for _, dict := range s.DictKeys { - sort.Strings(dict) - } - - s.processDocuments() - - storedIndexOffset, err := s.writeStoredFields() - if err != nil { - return 0, 0, 0, nil, err - } - - var fdvIndexOffset uint64 - var dictOffsets []uint64 - - if len(s.results) > 0 { - fdvIndexOffset, dictOffsets, err = s.writeDicts() - if err != nil { - return 0, 0, 0, nil, err - } - } else { - dictOffsets = make([]uint64, len(s.FieldsInv)) - } - - fieldsIndexOffset, err := persistFields(s.FieldsInv, s.w, dictOffsets) - if err != nil { - return 0, 0, 0, nil, err - } - - return storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, nil -} - -func (s *interim) getOrDefineField(fieldName string) int { - fieldIDPlus1, exists := s.FieldsMap[fieldName] - if !exists { - fieldIDPlus1 = uint16(len(s.FieldsInv) + 1) - s.FieldsMap[fieldName] = fieldIDPlus1 - s.FieldsInv = append(s.FieldsInv, fieldName) - - s.Dicts = append(s.Dicts, make(map[string]uint64)) - - n := len(s.DictKeys) - if n < cap(s.DictKeys) { - s.DictKeys = s.DictKeys[:n+1] - s.DictKeys[n] = s.DictKeys[n][:0] - } else { - s.DictKeys = append(s.DictKeys, []string(nil)) - } - } - - return int(fieldIDPlus1 - 1) -} - -// fill Dicts and DictKeys from analysis results -func (s *interim) prepareDicts() { - var pidNext int - - var totTFs int - var totLocs int - - visitField := func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - - dict := s.Dicts[fieldID] - dictKeys := s.DictKeys[fieldID] - - tfs := field.AnalyzedTokenFrequencies() - for term, tf := range tfs { - pidPlus1, exists := dict[term] - if !exists { - pidNext++ - pidPlus1 = uint64(pidNext) - - dict[term] = pidPlus1 - dictKeys = append(dictKeys, term) - - s.numTermsPerPostingsList = append(s.numTermsPerPostingsList, 0) - s.numLocsPerPostingsList = append(s.numLocsPerPostingsList, 0) - } - - pid := pidPlus1 - 1 - - s.numTermsPerPostingsList[pid] += 1 - s.numLocsPerPostingsList[pid] += len(tf.Locations) - - totLocs += len(tf.Locations) - } - - totTFs += len(tfs) - - s.DictKeys[fieldID] = dictKeys - } - - for _, result := range s.results { - // walk each composite field - result.VisitComposite(func(field index.CompositeField) { - visitField(field) - }) - - // walk each field - result.VisitFields(visitField) - } - - numPostingsLists := pidNext - - if cap(s.Postings) >= numPostingsLists { - s.Postings = s.Postings[:numPostingsLists] - } else { - postings := make([]*roaring.Bitmap, numPostingsLists) - copy(postings, s.Postings[:cap(s.Postings)]) - for i := 0; i < numPostingsLists; i++ { - if postings[i] == nil { - postings[i] = roaring.New() - } - } - s.Postings = postings - } - - if cap(s.FreqNorms) >= numPostingsLists { - s.FreqNorms = s.FreqNorms[:numPostingsLists] - } else { - s.FreqNorms = make([][]interimFreqNorm, numPostingsLists) - } - - if cap(s.freqNormsBacking) >= totTFs { - s.freqNormsBacking = s.freqNormsBacking[:totTFs] - } else { - s.freqNormsBacking = make([]interimFreqNorm, totTFs) - } - - freqNormsBacking := s.freqNormsBacking - for pid, numTerms := range s.numTermsPerPostingsList { - s.FreqNorms[pid] = freqNormsBacking[0:0] - freqNormsBacking = freqNormsBacking[numTerms:] - } - - if cap(s.Locs) >= numPostingsLists { - s.Locs = s.Locs[:numPostingsLists] - } else { - s.Locs = make([][]interimLoc, numPostingsLists) - } - - if cap(s.locsBacking) >= totLocs { - s.locsBacking = s.locsBacking[:totLocs] - } else { - s.locsBacking = make([]interimLoc, totLocs) - } - - locsBacking := s.locsBacking - for pid, numLocs := range s.numLocsPerPostingsList { - s.Locs[pid] = locsBacking[0:0] - locsBacking = locsBacking[numLocs:] - } -} - -func (s *interim) processDocuments() { - numFields := len(s.FieldsInv) - reuseFieldLens := make([]int, numFields) - reuseFieldTFs := make([]index.TokenFrequencies, numFields) - - for docNum, result := range s.results { - for i := 0; i < numFields; i++ { // clear these for reuse - reuseFieldLens[i] = 0 - reuseFieldTFs[i] = nil - } - - s.processDocument(uint64(docNum), result, - reuseFieldLens, reuseFieldTFs) - } -} - -func (s *interim) processDocument(docNum uint64, - result index.Document, - fieldLens []int, fieldTFs []index.TokenFrequencies) { - visitField := func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - fieldLens[fieldID] += field.AnalyzedLength() - - existingFreqs := fieldTFs[fieldID] - if existingFreqs != nil { - existingFreqs.MergeAll(field.Name(), field.AnalyzedTokenFrequencies()) - } else { - fieldTFs[fieldID] = field.AnalyzedTokenFrequencies() - } - } - - // walk each composite field - result.VisitComposite(func(field index.CompositeField) { - visitField(field) - }) - - // walk each field - result.VisitFields(visitField) - - // now that it's been rolled up into fieldTFs, walk that - for fieldID, tfs := range fieldTFs { - dict := s.Dicts[fieldID] - norm := float32(1.0 / math.Sqrt(float64(fieldLens[fieldID]))) - - for term, tf := range tfs { - pid := dict[term] - 1 - bs := s.Postings[pid] - bs.Add(uint32(docNum)) - - s.FreqNorms[pid] = append(s.FreqNorms[pid], - interimFreqNorm{ - freq: uint64(tf.Frequency()), - norm: norm, - numLocs: len(tf.Locations), - }) - - if len(tf.Locations) > 0 { - locs := s.Locs[pid] - - for _, loc := range tf.Locations { - var locf = uint16(fieldID) - if loc.Field != "" { - locf = uint16(s.getOrDefineField(loc.Field)) - } - var arrayposs []uint64 - if len(loc.ArrayPositions) > 0 { - arrayposs = loc.ArrayPositions - } - locs = append(locs, interimLoc{ - fieldID: locf, - pos: uint64(loc.Position), - start: uint64(loc.Start), - end: uint64(loc.End), - arrayposs: arrayposs, - }) - } - - s.Locs[pid] = locs - } - } - } -} - -func (s *interim) writeStoredFields() ( - storedIndexOffset uint64, err error) { - varBuf := make([]byte, binary.MaxVarintLen64) - metaEncode := func(val uint64) (int, error) { - wb := binary.PutUvarint(varBuf, val) - return s.metaBuf.Write(varBuf[:wb]) - } - - data, compressed := s.tmp0[:0], s.tmp1[:0] - defer func() { s.tmp0, s.tmp1 = data, compressed }() - - // keyed by docNum - docStoredOffsets := make([]uint64, len(s.results)) - - // keyed by fieldID, for the current doc in the loop - docStoredFields := map[uint16]interimStoredField{} - - for docNum, result := range s.results { - for fieldID := range docStoredFields { // reset for next doc - delete(docStoredFields, fieldID) - } - - var validationErr error - result.VisitFields(func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - - if field.Options().IsStored() { - isf := docStoredFields[fieldID] - isf.vals = append(isf.vals, field.Value()) - isf.typs = append(isf.typs, field.EncodedFieldType()) - isf.arrayposs = append(isf.arrayposs, field.ArrayPositions()) - docStoredFields[fieldID] = isf - } - - if field.Options().IncludeDocValues() { - s.IncludeDocValues[fieldID] = true - } - - err := ValidateDocFields(field) - if err != nil && validationErr == nil { - validationErr = err - } - }) - if validationErr != nil { - return 0, validationErr - } - - var curr int - - s.metaBuf.Reset() - data = data[:0] - - // _id field special case optimizes ExternalID() lookups - idFieldVal := docStoredFields[uint16(0)].vals[0] - _, err = metaEncode(uint64(len(idFieldVal))) - if err != nil { - return 0, err - } - - // handle non-"_id" fields - for fieldID := 1; fieldID < len(s.FieldsInv); fieldID++ { - isf, exists := docStoredFields[uint16(fieldID)] - if exists { - curr, data, err = persistStoredFieldValues( - fieldID, isf.vals, isf.typs, isf.arrayposs, - curr, metaEncode, data) - if err != nil { - return 0, err - } - } - } - - metaBytes := s.metaBuf.Bytes() - - compressed = snappy.Encode(compressed[:cap(compressed)], data) - - docStoredOffsets[docNum] = uint64(s.w.Count()) - - _, err := writeUvarints(s.w, - uint64(len(metaBytes)), - uint64(len(idFieldVal)+len(compressed))) - if err != nil { - return 0, err - } - - _, err = s.w.Write(metaBytes) - if err != nil { - return 0, err - } - - _, err = s.w.Write(idFieldVal) - if err != nil { - return 0, err - } - - _, err = s.w.Write(compressed) - if err != nil { - return 0, err - } - } - - storedIndexOffset = uint64(s.w.Count()) - - for _, docStoredOffset := range docStoredOffsets { - err = binary.Write(s.w, binary.BigEndian, docStoredOffset) - if err != nil { - return 0, err - } - } - - return storedIndexOffset, nil -} - -func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err error) { - dictOffsets = make([]uint64, len(s.FieldsInv)) - - fdvOffsetsStart := make([]uint64, len(s.FieldsInv)) - fdvOffsetsEnd := make([]uint64, len(s.FieldsInv)) - - buf := s.grabBuf(binary.MaxVarintLen64) - - tfEncoder := newChunkedIntCoder(uint64(s.chunkFactor), uint64(len(s.results)-1)) - locEncoder := newChunkedIntCoder(uint64(s.chunkFactor), uint64(len(s.results)-1)) - fdvEncoder := newChunkedContentCoder(uint64(s.chunkFactor), uint64(len(s.results)-1), s.w, false) - - var docTermMap [][]byte - - if s.builder == nil { - s.builder, err = vellum.New(&s.builderBuf, nil) - if err != nil { - return 0, nil, err - } - } - - for fieldID, terms := range s.DictKeys { - if cap(docTermMap) < len(s.results) { - docTermMap = make([][]byte, len(s.results)) - } else { - docTermMap = docTermMap[0:len(s.results)] - for docNum := range docTermMap { // reset the docTermMap - docTermMap[docNum] = docTermMap[docNum][:0] - } - } - - dict := s.Dicts[fieldID] - - for _, term := range terms { // terms are already sorted - pid := dict[term] - 1 - - postingsBS := s.Postings[pid] - - freqNorms := s.FreqNorms[pid] - freqNormOffset := 0 - - locs := s.Locs[pid] - locOffset := 0 - - postingsItr := postingsBS.Iterator() - for postingsItr.HasNext() { - docNum := uint64(postingsItr.Next()) - - freqNorm := freqNorms[freqNormOffset] - - err = tfEncoder.Add(docNum, - encodeFreqHasLocs(freqNorm.freq, freqNorm.numLocs > 0), - uint64(math.Float32bits(freqNorm.norm))) - if err != nil { - return 0, nil, err - } - - if freqNorm.numLocs > 0 { - numBytesLocs := 0 - for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { - numBytesLocs += totalUvarintBytes( - uint64(loc.fieldID), loc.pos, loc.start, loc.end, - uint64(len(loc.arrayposs)), loc.arrayposs) - } - - err = locEncoder.Add(docNum, uint64(numBytesLocs)) - if err != nil { - return 0, nil, err - } - - for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { - err = locEncoder.Add(docNum, - uint64(loc.fieldID), loc.pos, loc.start, loc.end, - uint64(len(loc.arrayposs))) - if err != nil { - return 0, nil, err - } - - err = locEncoder.Add(docNum, loc.arrayposs...) - if err != nil { - return 0, nil, err - } - } - - locOffset += freqNorm.numLocs - } - - freqNormOffset++ - - docTermMap[docNum] = append( - append(docTermMap[docNum], term...), - termSeparator) - } - - tfEncoder.Close() - locEncoder.Close() - - postingsOffset, err := - writePostings(postingsBS, tfEncoder, locEncoder, nil, s.w, buf) - if err != nil { - return 0, nil, err - } - - if postingsOffset > uint64(0) { - err = s.builder.Insert([]byte(term), postingsOffset) - if err != nil { - return 0, nil, err - } - } - - tfEncoder.Reset() - locEncoder.Reset() - } - - err = s.builder.Close() - if err != nil { - return 0, nil, err - } - - // record where this dictionary starts - dictOffsets[fieldID] = uint64(s.w.Count()) - - vellumData := s.builderBuf.Bytes() - - // write out the length of the vellum data - n := binary.PutUvarint(buf, uint64(len(vellumData))) - _, err = s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - - // write this vellum to disk - _, err = s.w.Write(vellumData) - if err != nil { - return 0, nil, err - } - - // reset vellum for reuse - s.builderBuf.Reset() - - err = s.builder.Reset(&s.builderBuf) - if err != nil { - return 0, nil, err - } - - // write the field doc values - if s.IncludeDocValues[fieldID] { - for docNum, docTerms := range docTermMap { - if len(docTerms) > 0 { - err = fdvEncoder.Add(uint64(docNum), docTerms) - if err != nil { - return 0, nil, err - } - } - } - err = fdvEncoder.Close() - if err != nil { - return 0, nil, err - } - - fdvOffsetsStart[fieldID] = uint64(s.w.Count()) - - _, err = fdvEncoder.Write() - if err != nil { - return 0, nil, err - } - - fdvOffsetsEnd[fieldID] = uint64(s.w.Count()) - - fdvEncoder.Reset() - } else { - fdvOffsetsStart[fieldID] = fieldNotUninverted - fdvOffsetsEnd[fieldID] = fieldNotUninverted - } - } - - fdvIndexOffset = uint64(s.w.Count()) - - for i := 0; i < len(fdvOffsetsStart); i++ { - n := binary.PutUvarint(buf, fdvOffsetsStart[i]) - _, err := s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - n = binary.PutUvarint(buf, fdvOffsetsEnd[i]) - _, err = s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - } - - return fdvIndexOffset, dictOffsets, nil -} - -// returns the total # of bytes needed to encode the given uint64's -// into binary.PutUVarint() encoding -func totalUvarintBytes(a, b, c, d, e uint64, more []uint64) (n int) { - n = numUvarintBytes(a) - n += numUvarintBytes(b) - n += numUvarintBytes(c) - n += numUvarintBytes(d) - n += numUvarintBytes(e) - for _, v := range more { - n += numUvarintBytes(v) - } - return n -} - -// returns # of bytes needed to encode x in binary.PutUvarint() encoding -func numUvarintBytes(x uint64) (n int) { - for x >= 0x80 { - x >>= 7 - n++ - } - return n + 1 -} diff --git a/vendor/github.com/blevesearch/zapx/v11/posting.go b/vendor/github.com/blevesearch/zapx/v11/posting.go deleted file mode 100644 index b7125e7c8..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/posting.go +++ /dev/null @@ -1,908 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "fmt" - "math" - "reflect" - - "github.com/RoaringBitmap/roaring" - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -var reflectStaticSizePostingsList int -var reflectStaticSizePostingsIterator int -var reflectStaticSizePosting int -var reflectStaticSizeLocation int - -func init() { - var pl PostingsList - reflectStaticSizePostingsList = int(reflect.TypeOf(pl).Size()) - var pi PostingsIterator - reflectStaticSizePostingsIterator = int(reflect.TypeOf(pi).Size()) - var p Posting - reflectStaticSizePosting = int(reflect.TypeOf(p).Size()) - var l Location - reflectStaticSizeLocation = int(reflect.TypeOf(l).Size()) -} - -// FST or vellum value (uint64) encoding is determined by the top two -// highest-order or most significant bits... -// -// encoding : MSB -// name : 63 62 61...to...bit #0 (LSB) -// ----------+---+---+--------------------------------------------------- -// general : 0 | 0 | 62-bits of postingsOffset. -// ~ : 0 | 1 | reserved for future. -// 1-hit : 1 | 0 | 31-bits of positive float31 norm | 31-bits docNum. -// ~ : 1 | 1 | reserved for future. -// -// Encoding "general" is able to handle all cases, where the -// postingsOffset points to more information about the postings for -// the term. -// -// Encoding "1-hit" is used to optimize a commonly seen case when a -// term has only a single hit. For example, a term in the _id field -// will have only 1 hit. The "1-hit" encoding is used for a term -// in a field when... -// -// - term vector info is disabled for that field; -// - and, the term appears in only a single doc for that field; -// - and, the term's freq is exactly 1 in that single doc for that field; -// - and, the docNum must fit into 31-bits; -// -// Otherwise, the "general" encoding is used instead. -// -// In the "1-hit" encoding, the field in that single doc may have -// other terms, which is supported in the "1-hit" encoding by the -// positive float31 norm. - -const FSTValEncodingMask = uint64(0xc000000000000000) -const FSTValEncodingGeneral = uint64(0x0000000000000000) -const FSTValEncoding1Hit = uint64(0x8000000000000000) - -func FSTValEncode1Hit(docNum uint64, normBits uint64) uint64 { - return FSTValEncoding1Hit | ((mask31Bits & normBits) << 31) | (mask31Bits & docNum) -} - -func FSTValDecode1Hit(v uint64) (docNum uint64, normBits uint64) { - return (mask31Bits & v), (mask31Bits & (v >> 31)) -} - -const mask31Bits = uint64(0x000000007fffffff) - -func under32Bits(x uint64) bool { - return x <= mask31Bits -} - -const DocNum1HitFinished = math.MaxUint64 - -var NormBits1Hit = uint64(math.Float32bits(float32(1))) - -// PostingsList is an in-memory representation of a postings list -type PostingsList struct { - sb *SegmentBase - postingsOffset uint64 - freqOffset uint64 - locOffset uint64 - postings *roaring.Bitmap - except *roaring.Bitmap - - // when normBits1Hit != 0, then this postings list came from a - // 1-hit encoding, and only the docNum1Hit & normBits1Hit apply - docNum1Hit uint64 - normBits1Hit uint64 -} - -// represents an immutable, empty postings list -var emptyPostingsList = &PostingsList{} - -func (p *PostingsList) Size() int { - sizeInBytes := reflectStaticSizePostingsList + SizeOfPtr - - if p.except != nil { - sizeInBytes += int(p.except.GetSizeInBytes()) - } - - return sizeInBytes -} - -func (p *PostingsList) OrInto(receiver *roaring.Bitmap) { - if p.normBits1Hit != 0 { - receiver.Add(uint32(p.docNum1Hit)) - return - } - - if p.postings != nil { - receiver.Or(p.postings) - } -} - -// Iterator returns an iterator for this postings list -func (p *PostingsList) Iterator(includeFreq, includeNorm, includeLocs bool, - prealloc segment.PostingsIterator) segment.PostingsIterator { - if p.normBits1Hit == 0 && p.postings == nil { - return emptyPostingsIterator - } - - var preallocPI *PostingsIterator - pi, ok := prealloc.(*PostingsIterator) - if ok && pi != nil { - preallocPI = pi - } - if preallocPI == emptyPostingsIterator { - preallocPI = nil - } - - return p.iterator(includeFreq, includeNorm, includeLocs, preallocPI) -} - -func (p *PostingsList) iterator(includeFreq, includeNorm, includeLocs bool, - rv *PostingsIterator) *PostingsIterator { - if rv == nil { - rv = &PostingsIterator{} - } else { - freqNormReader := rv.freqNormReader - if freqNormReader != nil { - freqNormReader.Reset([]byte(nil)) - } - - locReader := rv.locReader - if locReader != nil { - locReader.Reset([]byte(nil)) - } - - freqChunkOffsets := rv.freqChunkOffsets[:0] - locChunkOffsets := rv.locChunkOffsets[:0] - - nextLocs := rv.nextLocs[:0] - nextSegmentLocs := rv.nextSegmentLocs[:0] - - buf := rv.buf - - *rv = PostingsIterator{} // clear the struct - - rv.freqNormReader = freqNormReader - rv.locReader = locReader - - rv.freqChunkOffsets = freqChunkOffsets - rv.locChunkOffsets = locChunkOffsets - - rv.nextLocs = nextLocs - rv.nextSegmentLocs = nextSegmentLocs - - rv.buf = buf - } - - rv.postings = p - rv.includeFreqNorm = includeFreq || includeNorm || includeLocs - rv.includeLocs = includeLocs - - if p.normBits1Hit != 0 { - // "1-hit" encoding - rv.docNum1Hit = p.docNum1Hit - rv.normBits1Hit = p.normBits1Hit - - if p.except != nil && p.except.Contains(uint32(rv.docNum1Hit)) { - rv.docNum1Hit = DocNum1HitFinished - } - - return rv - } - - // "general" encoding, check if empty - if p.postings == nil { - return rv - } - - var n uint64 - var read int - - // prepare the freq chunk details - if rv.includeFreqNorm { - var numFreqChunks uint64 - numFreqChunks, read = binary.Uvarint(p.sb.mem[p.freqOffset+n : p.freqOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - if cap(rv.freqChunkOffsets) >= int(numFreqChunks) { - rv.freqChunkOffsets = rv.freqChunkOffsets[:int(numFreqChunks)] - } else { - rv.freqChunkOffsets = make([]uint64, int(numFreqChunks)) - } - for i := 0; i < int(numFreqChunks); i++ { - rv.freqChunkOffsets[i], read = binary.Uvarint(p.sb.mem[p.freqOffset+n : p.freqOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - } - rv.freqChunkStart = p.freqOffset + n - } - - // prepare the loc chunk details - if rv.includeLocs { - n = 0 - var numLocChunks uint64 - numLocChunks, read = binary.Uvarint(p.sb.mem[p.locOffset+n : p.locOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - if cap(rv.locChunkOffsets) >= int(numLocChunks) { - rv.locChunkOffsets = rv.locChunkOffsets[:int(numLocChunks)] - } else { - rv.locChunkOffsets = make([]uint64, int(numLocChunks)) - } - for i := 0; i < int(numLocChunks); i++ { - rv.locChunkOffsets[i], read = binary.Uvarint(p.sb.mem[p.locOffset+n : p.locOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - } - rv.locChunkStart = p.locOffset + n - } - - rv.all = p.postings.Iterator() - if p.except != nil { - rv.ActualBM = roaring.AndNot(p.postings, p.except) - rv.Actual = rv.ActualBM.Iterator() - } else { - rv.ActualBM = p.postings - rv.Actual = rv.all // Optimize to use same iterator for all & Actual. - } - - return rv -} - -// Count returns the number of items on this postings list -func (p *PostingsList) Count() uint64 { - var n, e uint64 - if p.normBits1Hit != 0 { - n = 1 - if p.except != nil && p.except.Contains(uint32(p.docNum1Hit)) { - e = 1 - } - } else if p.postings != nil { - n = p.postings.GetCardinality() - if p.except != nil { - e = p.postings.AndCardinality(p.except) - } - } - return n - e -} - -func (rv *PostingsList) read(postingsOffset uint64, d *Dictionary) error { - rv.postingsOffset = postingsOffset - - // handle "1-hit" encoding special case - if rv.postingsOffset&FSTValEncodingMask == FSTValEncoding1Hit { - return rv.init1Hit(postingsOffset) - } - - // read the location of the freq/norm details - var n uint64 - var read int - - rv.freqOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+binary.MaxVarintLen64]) - n += uint64(read) - - rv.locOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - var postingsLen uint64 - postingsLen, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - roaringBytes := d.sb.mem[postingsOffset+n : postingsOffset+n+postingsLen] - - if rv.postings == nil { - rv.postings = roaring.NewBitmap() - } - _, err := rv.postings.FromBuffer(roaringBytes) - if err != nil { - return fmt.Errorf("error loading roaring bitmap: %v", err) - } - - return nil -} - -func (rv *PostingsList) init1Hit(fstVal uint64) error { - docNum, normBits := FSTValDecode1Hit(fstVal) - - rv.docNum1Hit = docNum - rv.normBits1Hit = normBits - - return nil -} - -// PostingsIterator provides a way to iterate through the postings list -type PostingsIterator struct { - postings *PostingsList - all roaring.IntPeekable - Actual roaring.IntPeekable - ActualBM *roaring.Bitmap - - currChunk uint32 - currChunkFreqNorm []byte - currChunkLoc []byte - - freqNormReader *memUvarintReader - locReader *memUvarintReader - - freqChunkOffsets []uint64 - freqChunkStart uint64 - - locChunkOffsets []uint64 - locChunkStart uint64 - - next Posting // reused across Next() calls - nextLocs []Location // reused across Next() calls - nextSegmentLocs []segment.Location // reused across Next() calls - - docNum1Hit uint64 - normBits1Hit uint64 - - buf []byte - - includeFreqNorm bool - includeLocs bool -} - -var emptyPostingsIterator = &PostingsIterator{} - -func (i *PostingsIterator) Size() int { - sizeInBytes := reflectStaticSizePostingsIterator + SizeOfPtr + - len(i.currChunkFreqNorm) + - len(i.currChunkLoc) + - len(i.freqChunkOffsets)*SizeOfUint64 + - len(i.locChunkOffsets)*SizeOfUint64 + - i.next.Size() - - for _, entry := range i.nextLocs { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -func (i *PostingsIterator) loadChunk(chunk int) error { - if i.includeFreqNorm { - if chunk >= len(i.freqChunkOffsets) { - return fmt.Errorf("tried to load freq chunk that doesn't exist %d/(%d)", - chunk, len(i.freqChunkOffsets)) - } - - end, start := i.freqChunkStart, i.freqChunkStart - s, e := readChunkBoundary(chunk, i.freqChunkOffsets) - start += s - end += e - i.currChunkFreqNorm = i.postings.sb.mem[start:end] - if i.freqNormReader == nil { - i.freqNormReader = newMemUvarintReader(i.currChunkFreqNorm) - } else { - i.freqNormReader.Reset(i.currChunkFreqNorm) - } - } - - if i.includeLocs { - if chunk >= len(i.locChunkOffsets) { - return fmt.Errorf("tried to load loc chunk that doesn't exist %d/(%d)", - chunk, len(i.locChunkOffsets)) - } - - end, start := i.locChunkStart, i.locChunkStart - s, e := readChunkBoundary(chunk, i.locChunkOffsets) - start += s - end += e - i.currChunkLoc = i.postings.sb.mem[start:end] - if i.locReader == nil { - i.locReader = newMemUvarintReader(i.currChunkLoc) - } else { - i.locReader.Reset(i.currChunkLoc) - } - } - - i.currChunk = uint32(chunk) - return nil -} - -func (i *PostingsIterator) readFreqNormHasLocs() (uint64, uint64, bool, error) { - if i.normBits1Hit != 0 { - return 1, i.normBits1Hit, false, nil - } - - freqHasLocs, err := i.freqNormReader.ReadUvarint() - if err != nil { - return 0, 0, false, fmt.Errorf("error reading frequency: %v", err) - } - - freq, hasLocs := decodeFreqHasLocs(freqHasLocs) - - normBits, err := i.freqNormReader.ReadUvarint() - if err != nil { - return 0, 0, false, fmt.Errorf("error reading norm: %v", err) - } - - return freq, normBits, hasLocs, nil -} - -func (i *PostingsIterator) skipFreqNormReadHasLocs() (bool, error) { - if i.normBits1Hit != 0 { - return false, nil - } - - freqHasLocs, err := i.freqNormReader.ReadUvarint() - if err != nil { - return false, fmt.Errorf("error reading freqHasLocs: %v", err) - } - - i.freqNormReader.SkipUvarint() // Skip normBits. - - return freqHasLocs&0x01 != 0, nil // See decodeFreqHasLocs() / hasLocs. -} - -func encodeFreqHasLocs(freq uint64, hasLocs bool) uint64 { - rv := freq << 1 - if hasLocs { - rv = rv | 0x01 // 0'th LSB encodes whether there are locations - } - return rv -} - -func decodeFreqHasLocs(freqHasLocs uint64) (uint64, bool) { - freq := freqHasLocs >> 1 - hasLocs := freqHasLocs&0x01 != 0 - return freq, hasLocs -} - -// readLocation processes all the integers on the stream representing a single -// location. -func (i *PostingsIterator) readLocation(l *Location) error { - // read off field - fieldID, err := i.locReader.ReadUvarint() - if err != nil { - return fmt.Errorf("error reading location field: %v", err) - } - // read off pos - pos, err := i.locReader.ReadUvarint() - if err != nil { - return fmt.Errorf("error reading location pos: %v", err) - } - // read off start - start, err := i.locReader.ReadUvarint() - if err != nil { - return fmt.Errorf("error reading location start: %v", err) - } - // read off end - end, err := i.locReader.ReadUvarint() - if err != nil { - return fmt.Errorf("error reading location end: %v", err) - } - // read off num array pos - numArrayPos, err := i.locReader.ReadUvarint() - if err != nil { - return fmt.Errorf("error reading location num array pos: %v", err) - } - - l.field = i.postings.sb.fieldsInv[fieldID] - l.pos = pos - l.start = start - l.end = end - - if cap(l.ap) < int(numArrayPos) { - l.ap = make([]uint64, int(numArrayPos)) - } else { - l.ap = l.ap[:int(numArrayPos)] - } - - // read off array positions - for k := 0; k < int(numArrayPos); k++ { - ap, err := i.locReader.ReadUvarint() - if err != nil { - return fmt.Errorf("error reading array position: %v", err) - } - - l.ap[k] = ap - } - - return nil -} - -// Next returns the next posting on the postings list, or nil at the end -func (i *PostingsIterator) Next() (segment.Posting, error) { - return i.nextAtOrAfter(0) -} - -// Advance returns the posting at the specified docNum or it is not present -// the next posting, or if the end is reached, nil -func (i *PostingsIterator) Advance(docNum uint64) (segment.Posting, error) { - return i.nextAtOrAfter(docNum) -} - -// Next returns the next posting on the postings list, or nil at the end -func (i *PostingsIterator) nextAtOrAfter(atOrAfter uint64) (segment.Posting, error) { - docNum, exists, err := i.nextDocNumAtOrAfter(atOrAfter) - if err != nil || !exists { - return nil, err - } - - i.next = Posting{} // clear the struct - rv := &i.next - rv.docNum = docNum - - if !i.includeFreqNorm { - return rv, nil - } - - var normBits uint64 - var hasLocs bool - - rv.freq, normBits, hasLocs, err = i.readFreqNormHasLocs() - if err != nil { - return nil, err - } - - rv.norm = math.Float32frombits(uint32(normBits)) - - if i.includeLocs && hasLocs { - // prepare locations into reused slices, where we assume - // rv.freq >= "number of locs", since in a composite field, - // some component fields might have their IncludeTermVector - // flags disabled while other component fields are enabled - if cap(i.nextLocs) >= int(rv.freq) { - i.nextLocs = i.nextLocs[0:rv.freq] - } else { - i.nextLocs = make([]Location, rv.freq, rv.freq*2) - } - if cap(i.nextSegmentLocs) < int(rv.freq) { - i.nextSegmentLocs = make([]segment.Location, rv.freq, rv.freq*2) - } - rv.locs = i.nextSegmentLocs[:0] - - numLocsBytes, err := i.locReader.ReadUvarint() - if err != nil { - return nil, fmt.Errorf("error reading location numLocsBytes: %v", err) - } - - j := 0 - startBytesRemaining := i.locReader.Len() // # bytes remaining in the locReader - for startBytesRemaining-i.locReader.Len() < int(numLocsBytes) { - err := i.readLocation(&i.nextLocs[j]) - if err != nil { - return nil, err - } - rv.locs = append(rv.locs, &i.nextLocs[j]) - j++ - } - } - - return rv, nil -} - -var freqHasLocs1Hit = encodeFreqHasLocs(1, false) - -// nextBytes returns the docNum and the encoded freq & loc bytes for -// the next posting -func (i *PostingsIterator) nextBytes() ( - docNumOut uint64, freq uint64, normBits uint64, - bytesFreqNorm []byte, bytesLoc []byte, err error) { - docNum, exists, err := i.nextDocNumAtOrAfter(0) - if err != nil || !exists { - return 0, 0, 0, nil, nil, err - } - - if i.normBits1Hit != 0 { - if i.buf == nil { - i.buf = make([]byte, binary.MaxVarintLen64*2) - } - n := binary.PutUvarint(i.buf, freqHasLocs1Hit) - n += binary.PutUvarint(i.buf[n:], i.normBits1Hit) - return docNum, uint64(1), i.normBits1Hit, i.buf[:n], nil, nil - } - - startFreqNorm := len(i.currChunkFreqNorm) - i.freqNormReader.Len() - - var hasLocs bool - - freq, normBits, hasLocs, err = i.readFreqNormHasLocs() - if err != nil { - return 0, 0, 0, nil, nil, err - } - - endFreqNorm := len(i.currChunkFreqNorm) - i.freqNormReader.Len() - bytesFreqNorm = i.currChunkFreqNorm[startFreqNorm:endFreqNorm] - - if hasLocs { - startLoc := len(i.currChunkLoc) - i.locReader.Len() - - numLocsBytes, err := i.locReader.ReadUvarint() - if err != nil { - return 0, 0, 0, nil, nil, - fmt.Errorf("error reading location nextBytes numLocs: %v", err) - } - - // skip over all the location bytes - i.locReader.SkipBytes(int(numLocsBytes)) - - endLoc := len(i.currChunkLoc) - i.locReader.Len() - bytesLoc = i.currChunkLoc[startLoc:endLoc] - } - - return docNum, freq, normBits, bytesFreqNorm, bytesLoc, nil -} - -// nextDocNum returns the next docNum on the postings list, and also -// sets up the currChunk / loc related fields of the iterator. -func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool, error) { - if i.normBits1Hit != 0 { - if i.docNum1Hit == DocNum1HitFinished { - return 0, false, nil - } - if i.docNum1Hit < atOrAfter { - // advanced past our 1-hit - i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum - return 0, false, nil - } - docNum := i.docNum1Hit - i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum - return docNum, true, nil - } - - if i.Actual == nil || !i.Actual.HasNext() { - return 0, false, nil - } - - if i.postings == nil || i.postings.postings == i.ActualBM { - return i.nextDocNumAtOrAfterClean(atOrAfter) - } - - i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) - - if !i.Actual.HasNext() { - // couldn't find anything - return 0, false, nil - } - - n := i.Actual.Next() - allN := i.all.Next() - - nChunk := n / i.postings.sb.chunkFactor - - // when allN becomes >= to here, then allN is in the same chunk as nChunk. - allNReachesNChunk := nChunk * i.postings.sb.chunkFactor - - // n is the next actual hit (excluding some postings), and - // allN is the next hit in the full postings, and - // if they don't match, move 'all' forwards until they do - for allN != n { - // we've reached same chunk, so move the freq/norm/loc decoders forward - if i.includeFreqNorm && allN >= allNReachesNChunk { - err := i.currChunkNext(nChunk) - if err != nil { - return 0, false, err - } - } - - allN = i.all.Next() - } - - if i.includeFreqNorm && (i.currChunk != nChunk || i.currChunkFreqNorm == nil) { - err := i.loadChunk(int(nChunk)) - if err != nil { - return 0, false, fmt.Errorf("error loading chunk: %v", err) - } - } - - return uint64(n), true, nil -} - -// optimization when the postings list is "clean" (e.g., no updates & -// no deletions) where the all bitmap is the same as the actual bitmap -func (i *PostingsIterator) nextDocNumAtOrAfterClean( - atOrAfter uint64) (uint64, bool, error) { - if !i.includeFreqNorm { - i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) - - if !i.Actual.HasNext() { - return 0, false, nil // couldn't find anything - } - - return uint64(i.Actual.Next()), true, nil - } - - // freq-norm's needed, so maintain freq-norm chunk reader - sameChunkNexts := 0 // # of times we called Next() in the same chunk - n := i.Actual.Next() - nChunk := n / i.postings.sb.chunkFactor - - for uint64(n) < atOrAfter && i.Actual.HasNext() { - n = i.Actual.Next() - - nChunkPrev := nChunk - nChunk = n / i.postings.sb.chunkFactor - - if nChunk != nChunkPrev { - sameChunkNexts = 0 - } else { - sameChunkNexts += 1 - } - } - - if uint64(n) < atOrAfter { - // couldn't find anything - return 0, false, nil - } - - for j := 0; j < sameChunkNexts; j++ { - err := i.currChunkNext(nChunk) - if err != nil { - return 0, false, fmt.Errorf("error optimized currChunkNext: %v", err) - } - } - - if i.currChunk != nChunk || i.currChunkFreqNorm == nil { - err := i.loadChunk(int(nChunk)) - if err != nil { - return 0, false, fmt.Errorf("error loading chunk: %v", err) - } - } - - return uint64(n), true, nil -} - -func (i *PostingsIterator) currChunkNext(nChunk uint32) error { - if i.currChunk != nChunk || i.currChunkFreqNorm == nil { - err := i.loadChunk(int(nChunk)) - if err != nil { - return fmt.Errorf("error loading chunk: %v", err) - } - } - - // read off freq/offsets even though we don't care about them - hasLocs, err := i.skipFreqNormReadHasLocs() - if err != nil { - return err - } - - if i.includeLocs && hasLocs { - numLocsBytes, err := i.locReader.ReadUvarint() - if err != nil { - return fmt.Errorf("error reading location numLocsBytes: %v", err) - } - - // skip over all the location bytes - i.locReader.SkipBytes(int(numLocsBytes)) - } - - return nil -} - -// DocNum1Hit returns the docNum and true if this is "1-hit" optimized -// and the docNum is available. -func (p *PostingsIterator) DocNum1Hit() (uint64, bool) { - if p.normBits1Hit != 0 && p.docNum1Hit != DocNum1HitFinished { - return p.docNum1Hit, true - } - return 0, false -} - -// ActualBitmap returns the underlying actual bitmap -// which can be used up the stack for optimizations -func (p *PostingsIterator) ActualBitmap() *roaring.Bitmap { - return p.ActualBM -} - -// ReplaceActual replaces the ActualBM with the provided -// bitmap -func (p *PostingsIterator) ReplaceActual(abm *roaring.Bitmap) { - p.ActualBM = abm - p.Actual = abm.Iterator() -} - -// PostingsIteratorFromBitmap constructs a PostingsIterator given an -// "actual" bitmap. -func PostingsIteratorFromBitmap(bm *roaring.Bitmap, - includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { - return &PostingsIterator{ - ActualBM: bm, - Actual: bm.Iterator(), - includeFreqNorm: includeFreqNorm, - includeLocs: includeLocs, - }, nil -} - -// PostingsIteratorFrom1Hit constructs a PostingsIterator given a -// 1-hit docNum. -func PostingsIteratorFrom1Hit(docNum1Hit uint64, - includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { - return &PostingsIterator{ - docNum1Hit: docNum1Hit, - normBits1Hit: NormBits1Hit, - includeFreqNorm: includeFreqNorm, - includeLocs: includeLocs, - }, nil -} - -// Posting is a single entry in a postings list -type Posting struct { - docNum uint64 - freq uint64 - norm float32 - locs []segment.Location -} - -func (p *Posting) Size() int { - sizeInBytes := reflectStaticSizePosting - - for _, entry := range p.locs { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -// Number returns the document number of this posting in this segment -func (p *Posting) Number() uint64 { - return p.docNum -} - -// Frequency returns the frequencies of occurrence of this term in this doc/field -func (p *Posting) Frequency() uint64 { - return p.freq -} - -// Norm returns the normalization factor for this posting -func (p *Posting) Norm() float64 { - return float64(p.norm) -} - -// Locations returns the location information for each occurrence -func (p *Posting) Locations() []segment.Location { - return p.locs -} - -// Location represents the location of a single occurrence -type Location struct { - field string - pos uint64 - start uint64 - end uint64 - ap []uint64 -} - -func (l *Location) Size() int { - return reflectStaticSizeLocation + - len(l.field) + - len(l.ap)*SizeOfUint64 -} - -// Field returns the name of the field (useful in composite fields to know -// which original field the value came from) -func (l *Location) Field() string { - return l.field -} - -// Start returns the start byte offset of this occurrence -func (l *Location) Start() uint64 { - return l.start -} - -// End returns the end byte offset of this occurrence -func (l *Location) End() uint64 { - return l.end -} - -// Pos returns the 1-based phrase position of this occurrence -func (l *Location) Pos() uint64 { - return l.pos -} - -// ArrayPositions returns the array position vector associated with this occurrence -func (l *Location) ArrayPositions() []uint64 { - return l.ap -} diff --git a/vendor/github.com/blevesearch/zapx/v11/read.go b/vendor/github.com/blevesearch/zapx/v11/read.go deleted file mode 100644 index e47d4c6ab..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/read.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import "encoding/binary" - -func (s *SegmentBase) getDocStoredMetaAndCompressed(docNum uint64) ([]byte, []byte) { - _, storedOffset, n, metaLen, dataLen := s.getDocStoredOffsets(docNum) - - meta := s.mem[storedOffset+n : storedOffset+n+metaLen] - data := s.mem[storedOffset+n+metaLen : storedOffset+n+metaLen+dataLen] - - return meta, data -} - -func (s *SegmentBase) getDocStoredOffsets(docNum uint64) ( - uint64, uint64, uint64, uint64, uint64) { - indexOffset := s.storedIndexOffset + (8 * docNum) - - storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) - - var n uint64 - - metaLen, read := binary.Uvarint(s.mem[storedOffset : storedOffset+binary.MaxVarintLen64]) - n += uint64(read) - - dataLen, read := binary.Uvarint(s.mem[storedOffset+n : storedOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - return indexOffset, storedOffset, n, metaLen, dataLen -} diff --git a/vendor/github.com/blevesearch/zapx/v11/segment.go b/vendor/github.com/blevesearch/zapx/v11/segment.go deleted file mode 100644 index 7995de48c..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/segment.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "os" - "sync" - "unsafe" - - "github.com/RoaringBitmap/roaring" - mmap "github.com/blevesearch/mmap-go" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - "github.com/golang/snappy" -) - -var reflectStaticSizeSegmentBase int - -func init() { - var sb SegmentBase - reflectStaticSizeSegmentBase = int(unsafe.Sizeof(sb)) -} - -// Open returns a zap impl of a segment -func (*ZapPlugin) Open(path string) (segment.Segment, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - mm, err := mmap.Map(f, mmap.RDONLY, 0) - if err != nil { - // mmap failed, try to close the file - _ = f.Close() - return nil, err - } - - rv := &Segment{ - SegmentBase: SegmentBase{ - mem: mm[0 : len(mm)-FooterSize], - fieldsMap: make(map[string]uint16), - fieldDvReaders: make(map[uint16]*docValueReader), - fieldFSTs: make(map[uint16]*vellum.FST), - }, - f: f, - mm: mm, - path: path, - refs: 1, - } - rv.SegmentBase.updateSize() - - err = rv.loadConfig() - if err != nil { - _ = rv.Close() - return nil, err - } - - err = rv.loadFields() - if err != nil { - _ = rv.Close() - return nil, err - } - - err = rv.loadDvReaders() - if err != nil { - _ = rv.Close() - return nil, err - } - - return rv, nil -} - -// SegmentBase is a memory only, read-only implementation of the -// segment.Segment interface, using zap's data representation. -type SegmentBase struct { - mem []byte - memCRC uint32 - chunkFactor uint32 - fieldsMap map[string]uint16 // fieldName -> fieldID+1 - fieldsInv []string // fieldID -> fieldName - numDocs uint64 - storedIndexOffset uint64 - fieldsIndexOffset uint64 - docValueOffset uint64 - dictLocs []uint64 - fieldDvReaders map[uint16]*docValueReader // naive chunk cache per field - fieldDvNames []string // field names cached in fieldDvReaders - size uint64 - - m sync.Mutex - fieldFSTs map[uint16]*vellum.FST -} - -func (sb *SegmentBase) Size() int { - return int(sb.size) -} - -func (sb *SegmentBase) updateSize() { - sizeInBytes := reflectStaticSizeSegmentBase + - cap(sb.mem) - - // fieldsMap - for k := range sb.fieldsMap { - sizeInBytes += (len(k) + SizeOfString) + SizeOfUint16 - } - - // fieldsInv, dictLocs - for _, entry := range sb.fieldsInv { - sizeInBytes += len(entry) + SizeOfString - } - sizeInBytes += len(sb.dictLocs) * SizeOfUint64 - - // fieldDvReaders - for _, v := range sb.fieldDvReaders { - sizeInBytes += SizeOfUint16 + SizeOfPtr - if v != nil { - sizeInBytes += v.size() - } - } - - sb.size = uint64(sizeInBytes) -} - -func (sb *SegmentBase) AddRef() {} -func (sb *SegmentBase) DecRef() (err error) { return nil } -func (sb *SegmentBase) Close() (err error) { return nil } - -// Segment implements a persisted segment.Segment interface, by -// embedding an mmap()'ed SegmentBase. -type Segment struct { - SegmentBase - - f *os.File - mm mmap.MMap - path string - version uint32 - crc uint32 - - m sync.Mutex // Protects the fields that follow. - refs int64 -} - -func (s *Segment) Size() int { - // 8 /* size of file pointer */ - // 4 /* size of version -> uint32 */ - // 4 /* size of crc -> uint32 */ - sizeOfUints := 16 - - sizeInBytes := (len(s.path) + SizeOfString) + sizeOfUints - - // mutex, refs -> int64 - sizeInBytes += 16 - - // do not include the mmap'ed part - return sizeInBytes + s.SegmentBase.Size() - cap(s.mem) -} - -func (s *Segment) AddRef() { - s.m.Lock() - s.refs++ - s.m.Unlock() -} - -func (s *Segment) DecRef() (err error) { - s.m.Lock() - s.refs-- - if s.refs == 0 { - err = s.closeActual() - } - s.m.Unlock() - return err -} - -func (s *Segment) loadConfig() error { - crcOffset := len(s.mm) - 4 - s.crc = binary.BigEndian.Uint32(s.mm[crcOffset : crcOffset+4]) - - verOffset := crcOffset - 4 - s.version = binary.BigEndian.Uint32(s.mm[verOffset : verOffset+4]) - if s.version != Version { - return fmt.Errorf("unsupported version %d", s.version) - } - - chunkOffset := verOffset - 4 - s.chunkFactor = binary.BigEndian.Uint32(s.mm[chunkOffset : chunkOffset+4]) - - docValueOffset := chunkOffset - 8 - s.docValueOffset = binary.BigEndian.Uint64(s.mm[docValueOffset : docValueOffset+8]) - - fieldsIndexOffset := docValueOffset - 8 - s.fieldsIndexOffset = binary.BigEndian.Uint64(s.mm[fieldsIndexOffset : fieldsIndexOffset+8]) - - storedIndexOffset := fieldsIndexOffset - 8 - s.storedIndexOffset = binary.BigEndian.Uint64(s.mm[storedIndexOffset : storedIndexOffset+8]) - - numDocsOffset := storedIndexOffset - 8 - s.numDocs = binary.BigEndian.Uint64(s.mm[numDocsOffset : numDocsOffset+8]) - return nil -} - -func (s *SegmentBase) loadFields() error { - // NOTE for now we assume the fields index immediately precedes - // the footer, and if this changes, need to adjust accordingly (or - // store explicit length), where s.mem was sliced from s.mm in Open(). - fieldsIndexEnd := uint64(len(s.mem)) - - // iterate through fields index - var fieldID uint64 - for s.fieldsIndexOffset+(8*fieldID) < fieldsIndexEnd { - addr := binary.BigEndian.Uint64(s.mem[s.fieldsIndexOffset+(8*fieldID) : s.fieldsIndexOffset+(8*fieldID)+8]) - - dictLoc, read := binary.Uvarint(s.mem[addr:fieldsIndexEnd]) - n := uint64(read) - s.dictLocs = append(s.dictLocs, dictLoc) - - var nameLen uint64 - nameLen, read = binary.Uvarint(s.mem[addr+n : fieldsIndexEnd]) - n += uint64(read) - - name := string(s.mem[addr+n : addr+n+nameLen]) - s.fieldsInv = append(s.fieldsInv, name) - s.fieldsMap[name] = uint16(fieldID + 1) - - fieldID++ - } - return nil -} - -// Dictionary returns the term dictionary for the specified field -func (s *SegmentBase) Dictionary(field string) (segment.TermDictionary, error) { - dict, err := s.dictionary(field) - if err == nil && dict == nil { - return emptyDictionary, nil - } - return dict, err -} - -func (sb *SegmentBase) dictionary(field string) (rv *Dictionary, err error) { - fieldIDPlus1 := sb.fieldsMap[field] - if fieldIDPlus1 > 0 { - rv = &Dictionary{ - sb: sb, - field: field, - fieldID: fieldIDPlus1 - 1, - } - - dictStart := sb.dictLocs[rv.fieldID] - if dictStart > 0 { - var ok bool - sb.m.Lock() - if rv.fst, ok = sb.fieldFSTs[rv.fieldID]; !ok { - // read the length of the vellum data - vellumLen, read := binary.Uvarint(sb.mem[dictStart : dictStart+binary.MaxVarintLen64]) - fstBytes := sb.mem[dictStart+uint64(read) : dictStart+uint64(read)+vellumLen] - rv.fst, err = vellum.Load(fstBytes) - if err != nil { - sb.m.Unlock() - return nil, fmt.Errorf("dictionary field %s vellum err: %v", field, err) - } - - sb.fieldFSTs[rv.fieldID] = rv.fst - } - - sb.m.Unlock() - rv.fstReader, err = rv.fst.Reader() - if err != nil { - return nil, fmt.Errorf("dictionary field %s vellum reader err: %v", field, err) - } - } - } - - return rv, nil -} - -// visitDocumentCtx holds data structures that are reusable across -// multiple VisitDocument() calls to avoid memory allocations -type visitDocumentCtx struct { - buf []byte - reader bytes.Reader - arrayPos []uint64 -} - -var visitDocumentCtxPool = sync.Pool{ - New: func() interface{} { - reuse := &visitDocumentCtx{} - return reuse - }, -} - -// VisitStoredFields invokes the StoredFieldValueVisitor for each stored field -// for the specified doc number -func (s *SegmentBase) VisitStoredFields(num uint64, visitor segment.StoredFieldValueVisitor) error { - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - defer visitDocumentCtxPool.Put(vdc) - return s.visitStoredFields(vdc, num, visitor) -} - -func (s *SegmentBase) visitStoredFields(vdc *visitDocumentCtx, num uint64, - visitor segment.StoredFieldValueVisitor) error { - // first make sure this is a valid number in this segment - if num < s.numDocs { - meta, compressed := s.getDocStoredMetaAndCompressed(num) - - vdc.reader.Reset(meta) - - // handle _id field special case - idFieldValLen, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - idFieldVal := compressed[:idFieldValLen] - - keepGoing := visitor("_id", byte('t'), idFieldVal, nil) - if !keepGoing { - visitDocumentCtxPool.Put(vdc) - return nil - } - - // handle non-"_id" fields - compressed = compressed[idFieldValLen:] - - uncompressed, err := snappy.Decode(vdc.buf[:cap(vdc.buf)], compressed) - if err != nil { - return err - } - - for keepGoing { - field, err := binary.ReadUvarint(&vdc.reader) - if err == io.EOF { - break - } - if err != nil { - return err - } - typ, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - offset, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - l, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - numap, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - var arrayPos []uint64 - if numap > 0 { - if cap(vdc.arrayPos) < int(numap) { - vdc.arrayPos = make([]uint64, numap) - } - arrayPos = vdc.arrayPos[:numap] - for i := 0; i < int(numap); i++ { - ap, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - arrayPos[i] = ap - } - } - - value := uncompressed[offset : offset+l] - keepGoing = visitor(s.fieldsInv[field], byte(typ), value, arrayPos) - } - - vdc.buf = uncompressed - } - return nil -} - -// DocID returns the value of the _id field for the given docNum -func (s *SegmentBase) DocID(num uint64) ([]byte, error) { - if num >= s.numDocs { - return nil, nil - } - - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - - meta, compressed := s.getDocStoredMetaAndCompressed(num) - - vdc.reader.Reset(meta) - - // handle _id field special case - idFieldValLen, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return nil, err - } - idFieldVal := compressed[:idFieldValLen] - - visitDocumentCtxPool.Put(vdc) - - return idFieldVal, nil -} - -// Count returns the number of documents in this segment. -func (s *SegmentBase) Count() uint64 { - return s.numDocs -} - -// DocNumbers returns a bitset corresponding to the doc numbers of all the -// provided _id strings -func (s *SegmentBase) DocNumbers(ids []string) (*roaring.Bitmap, error) { - rv := roaring.New() - - if len(s.fieldsMap) > 0 { - idDict, err := s.dictionary("_id") - if err != nil { - return nil, err - } - - postingsList := emptyPostingsList - - sMax, err := idDict.fst.GetMaxKey() - if err != nil { - return nil, err - } - sMaxStr := string(sMax) - filteredIds := make([]string, 0, len(ids)) - for _, id := range ids { - if id <= sMaxStr { - filteredIds = append(filteredIds, id) - } - } - - for _, id := range filteredIds { - postingsList, err = idDict.postingsList([]byte(id), nil, postingsList) - if err != nil { - return nil, err - } - postingsList.OrInto(rv) - } - } - - return rv, nil -} - -// Fields returns the field names used in this segment -func (s *SegmentBase) Fields() []string { - return s.fieldsInv -} - -// Path returns the path of this segment on disk -func (s *Segment) Path() string { - return s.path -} - -// Close releases all resources associated with this segment -func (s *Segment) Close() (err error) { - return s.DecRef() -} - -func (s *Segment) closeActual() (err error) { - if s.mm != nil { - err = s.mm.Unmap() - } - // try to close file even if unmap failed - if s.f != nil { - err2 := s.f.Close() - if err == nil { - // try to return first error - err = err2 - } - } - return -} - -// some helpers i started adding for the command-line utility - -// Data returns the underlying mmaped data slice -func (s *Segment) Data() []byte { - return s.mm -} - -// CRC returns the CRC value stored in the file footer -func (s *Segment) CRC() uint32 { - return s.crc -} - -// Version returns the file version in the file footer -func (s *Segment) Version() uint32 { - return s.version -} - -// ChunkFactor returns the chunk factor in the file footer -func (s *Segment) ChunkFactor() uint32 { - return s.chunkFactor -} - -// FieldsIndexOffset returns the fields index offset in the file footer -func (s *Segment) FieldsIndexOffset() uint64 { - return s.fieldsIndexOffset -} - -// StoredIndexOffset returns the stored value index offset in the file footer -func (s *Segment) StoredIndexOffset() uint64 { - return s.storedIndexOffset -} - -// DocValueOffset returns the docValue offset in the file footer -func (s *Segment) DocValueOffset() uint64 { - return s.docValueOffset -} - -// NumDocs returns the number of documents in the file footer -func (s *Segment) NumDocs() uint64 { - return s.numDocs -} - -// DictAddr is a helper function to compute the file offset where the -// dictionary is stored for the specified field. -func (s *Segment) DictAddr(field string) (uint64, error) { - fieldIDPlus1, ok := s.fieldsMap[field] - if !ok { - return 0, fmt.Errorf("no such field '%s'", field) - } - - return s.dictLocs[fieldIDPlus1-1], nil -} - -func (s *SegmentBase) loadDvReaders() error { - if s.docValueOffset == fieldNotUninverted || s.numDocs == 0 { - return nil - } - - var read uint64 - for fieldID, field := range s.fieldsInv { - var fieldLocStart, fieldLocEnd uint64 - var n int - fieldLocStart, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) - if n <= 0 { - return fmt.Errorf("loadDvReaders: failed to read the docvalue offset start for field %d", fieldID) - } - read += uint64(n) - fieldLocEnd, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) - if n <= 0 { - return fmt.Errorf("loadDvReaders: failed to read the docvalue offset end for field %d", fieldID) - } - read += uint64(n) - - fieldDvReader, err := s.loadFieldDocValueReader(field, fieldLocStart, fieldLocEnd) - if err != nil { - return err - } - if fieldDvReader != nil { - s.fieldDvReaders[uint16(fieldID)] = fieldDvReader - s.fieldDvNames = append(s.fieldDvNames, field) - } - } - - return nil -} diff --git a/vendor/github.com/blevesearch/zapx/v11/sizes.go b/vendor/github.com/blevesearch/zapx/v11/sizes.go deleted file mode 100644 index 34166ea33..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/sizes.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "reflect" -) - -func init() { - var b bool - SizeOfBool = int(reflect.TypeOf(b).Size()) - var f32 float32 - SizeOfFloat32 = int(reflect.TypeOf(f32).Size()) - var f64 float64 - SizeOfFloat64 = int(reflect.TypeOf(f64).Size()) - var i int - SizeOfInt = int(reflect.TypeOf(i).Size()) - var m map[int]int - SizeOfMap = int(reflect.TypeOf(m).Size()) - var ptr *int - SizeOfPtr = int(reflect.TypeOf(ptr).Size()) - var slice []int - SizeOfSlice = int(reflect.TypeOf(slice).Size()) - var str string - SizeOfString = int(reflect.TypeOf(str).Size()) - var u8 uint8 - SizeOfUint8 = int(reflect.TypeOf(u8).Size()) - var u16 uint16 - SizeOfUint16 = int(reflect.TypeOf(u16).Size()) - var u32 uint32 - SizeOfUint32 = int(reflect.TypeOf(u32).Size()) - var u64 uint64 - SizeOfUint64 = int(reflect.TypeOf(u64).Size()) -} - -var SizeOfBool int -var SizeOfFloat32 int -var SizeOfFloat64 int -var SizeOfInt int -var SizeOfMap int -var SizeOfPtr int -var SizeOfSlice int -var SizeOfString int -var SizeOfUint8 int -var SizeOfUint16 int -var SizeOfUint32 int -var SizeOfUint64 int diff --git a/vendor/github.com/blevesearch/zapx/v11/write.go b/vendor/github.com/blevesearch/zapx/v11/write.go deleted file mode 100644 index cddaedd00..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/write.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "io" - - "github.com/RoaringBitmap/roaring" -) - -// writes out the length of the roaring bitmap in bytes as varint -// then writes out the roaring bitmap itself -func writeRoaringWithLen(r *roaring.Bitmap, w io.Writer, - reuseBufVarint []byte) (int, error) { - buf, err := r.ToBytes() - if err != nil { - return 0, err - } - - var tw int - - // write out the length - n := binary.PutUvarint(reuseBufVarint, uint64(len(buf))) - nw, err := w.Write(reuseBufVarint[:n]) - tw += nw - if err != nil { - return tw, err - } - - // write out the roaring bytes - nw, err = w.Write(buf) - tw += nw - if err != nil { - return tw, err - } - - return tw, nil -} - -func persistFields(fieldsInv []string, w *CountHashWriter, dictLocs []uint64) (uint64, error) { - var rv uint64 - var fieldsOffsets []uint64 - - for fieldID, fieldName := range fieldsInv { - // record start of this field - fieldsOffsets = append(fieldsOffsets, uint64(w.Count())) - - // write out the dict location and field name length - _, err := writeUvarints(w, dictLocs[fieldID], uint64(len(fieldName))) - if err != nil { - return 0, err - } - - // write out the field name - _, err = w.Write([]byte(fieldName)) - if err != nil { - return 0, err - } - } - - // now write out the fields index - rv = uint64(w.Count()) - for fieldID := range fieldsInv { - err := binary.Write(w, binary.BigEndian, fieldsOffsets[fieldID]) - if err != nil { - return 0, err - } - } - - return rv, nil -} - -// FooterSize is the size of the footer record in bytes -// crc + ver + chunk + field offset + stored offset + num docs + docValueOffset -const FooterSize = 4 + 4 + 4 + 8 + 8 + 8 + 8 - -func persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64, - chunkFactor uint32, crcBeforeFooter uint32, writerIn io.Writer) error { - w := NewCountHashWriter(writerIn) - w.crc = crcBeforeFooter - - // write out the number of docs - err := binary.Write(w, binary.BigEndian, numDocs) - if err != nil { - return err - } - // write out the stored field index location: - err = binary.Write(w, binary.BigEndian, storedIndexOffset) - if err != nil { - return err - } - // write out the field index location - err = binary.Write(w, binary.BigEndian, fieldsIndexOffset) - if err != nil { - return err - } - // write out the fieldDocValue location - err = binary.Write(w, binary.BigEndian, docValueOffset) - if err != nil { - return err - } - // write out 32-bit chunk factor - err = binary.Write(w, binary.BigEndian, chunkFactor) - if err != nil { - return err - } - // write out 32-bit version - err = binary.Write(w, binary.BigEndian, Version) - if err != nil { - return err - } - // write out CRC-32 of everything upto but not including this CRC - err = binary.Write(w, binary.BigEndian, w.crc) - if err != nil { - return err - } - return nil -} - -func writeUvarints(w io.Writer, vals ...uint64) (tw int, err error) { - buf := make([]byte, binary.MaxVarintLen64) - for _, val := range vals { - n := binary.PutUvarint(buf, val) - var nw int - nw, err = w.Write(buf[:n]) - tw += nw - if err != nil { - return tw, err - } - } - return tw, err -} diff --git a/vendor/github.com/blevesearch/zapx/v11/zap.md b/vendor/github.com/blevesearch/zapx/v11/zap.md deleted file mode 100644 index d74dc548b..000000000 --- a/vendor/github.com/blevesearch/zapx/v11/zap.md +++ /dev/null @@ -1,177 +0,0 @@ -# ZAP File Format - -## Legend - -### Sections - - |========| - | | section - |========| - -### Fixed-size fields - - |--------| |----| |--| |-| - | | uint64 | | uint32 | | uint16 | | uint8 - |--------| |----| |--| |-| - -### Varints - - |~~~~~~~~| - | | varint(up to uint64) - |~~~~~~~~| - -### Arbitrary-length fields - - |--------...---| - | | arbitrary-length field (string, vellum, roaring bitmap) - |--------...---| - -### Chunked data - - [--------] - [ ] - [--------] - -## Overview - -Footer section describes the configuration of particular ZAP file. The format of footer is version-dependent, so it is necessary to check `V` field before the parsing. - - |==================================================| - | Stored Fields | - |==================================================| - |-----> | Stored Fields Index | - | |==================================================| - | | Dictionaries + Postings + DocValues | - | |==================================================| - | |---> | DocValues Index | - | | |==================================================| - | | | Fields | - | | |==================================================| - | | |-> | Fields Index | - | | | |========|========|========|========|====|====|====| - | | | | D# | SF | F | FDV | CF | V | CC | (Footer) - | | | |========|====|===|====|===|====|===|====|====|====| - | | | | | | - |-+-+-----------------| | | - | |--------------------------| | - |-------------------------------------| - - D#. Number of Docs. - SF. Stored Fields Index Offset. - F. Field Index Offset. - FDV. Field DocValue Offset. - CF. Chunk Factor. - V. Version. - CC. CRC32. - -## Stored Fields - -Stored Fields Index is `D#` consecutive 64-bit unsigned integers - offsets, where relevant Stored Fields Data records are located. - - 0 [SF] [SF + D# * 8] - | Stored Fields | Stored Fields Index | - |================================|==================================| - | | | - | |--------------------| ||--------|--------|. . .|--------|| - | |-> | Stored Fields Data | || 0 | 1 | | D# - 1 || - | | |--------------------| ||--------|----|---|. . .|--------|| - | | | | | - |===|============================|==============|===================| - | | - |-------------------------------------------| - -Stored Fields Data is an arbitrary size record, which consists of metadata and [Snappy](https://github.com/golang/snappy)-compressed data. - - Stored Fields Data - |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| - | MDS | CDS | MD | CD | - |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| - - MDS. Metadata size. - CDS. Compressed data size. - MD. Metadata. - CD. Snappy-compressed data. - -## Fields - -Fields Index section located between addresses `F` and `len(file) - len(footer)` and consist of `uint64` values (`F1`, `F2`, ...) which are offsets to records in Fields section. We have `F# = (len(file) - len(footer) - F) / sizeof(uint64)` fields. - - - (...) [F] [F + F#] - | Fields | Fields Index. | - |================================|================================| - | | | - | |~~~~~~~~|~~~~~~~~|---...---|||--------|--------|...|--------|| - ||->| Dict | Length | Name ||| 0 | 1 | | F# - 1 || - || |~~~~~~~~|~~~~~~~~|---...---|||--------|----|---|...|--------|| - || | | | - ||===============================|==============|=================| - | | - |----------------------------------------------| - - -## Dictionaries + Postings - -Each of fields has its own dictionary, encoded in [Vellum](https://github.com/couchbase/vellum) format. Dictionary consists of pairs `(term, offset)`, where `offset` indicates the position of postings (list of documents) for this particular term. - - |================================================================|- Dictionaries + - | | Postings + - | | DocValues - | Freq/Norm (chunked) | - | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | - | |->[ Freq | Norm (float32 under varint) ] | - | | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | - | | | - | |------------------------------------------------------------| | - | Location Details (chunked) | | - | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | - | |->[ Size | Pos | Start | End | Arr# | ArrPos | ... ] | | - | | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | - | | | | - | |----------------------| | | - | Postings List | | | - | |~~~~~~~~|~~~~~|~~|~~~~~~~~|-----------...--| | | - | |->| F/N | LD | Length | ROARING BITMAP | | | - | | |~~~~~|~~|~~~~~~~~|~~~~~~~~|-----------...--| | | - | | |----------------------------------------------| | - | |--------------------------------------| | - | Dictionary | | - | |~~~~~~~~|--------------------------|-...-| | - | |->| Length | VELLUM DATA : (TERM -> OFFSET) | | - | | |~~~~~~~~|----------------------------...-| | - | | | - |======|=========================================================|- DocValues Index - | | | - |======|=========================================================|- Fields - | | | - | |~~~~|~~~|~~~~~~~~|---...---| | - | | Dict | Length | Name | | - | |~~~~~~~~|~~~~~~~~|---...---| | - | | - |================================================================| - -## DocValues - -DocValues Index is `F#` pairs of varints, one pair per field. Each pair of varints indicates start and end point of DocValues slice. - - |================================================================| - | |------...--| | - | |->| DocValues |<-| | - | | |------...--| | | - |==|=================|===========================================|- DocValues Index - ||~|~~~~~~~~~|~~~~~~~|~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| - || DV1 START | DV1 STOP | . . . . . | DV(F#) START | DV(F#) END || - ||~~~~~~~~~~~|~~~~~~~~~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| - |================================================================| - -DocValues is chunked Snappy-compressed values for each document and field. - - [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] - [ Doc# in Chunk | Doc1 | Offset1 | ... | DocN | OffsetN | SNAPPY COMPRESSED DATA ] - [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] - -Last 16 bytes are description of chunks. - - |~~~~~~~~~~~~...~|----------------|----------------| - | Chunk Sizes | Chunk Size Arr | Chunk# | - |~~~~~~~~~~~~...~|----------------|----------------| diff --git a/vendor/github.com/blevesearch/zapx/v12/.golangci.yml b/vendor/github.com/blevesearch/zapx/v12/.golangci.yml deleted file mode 100644 index f0f2f6067..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/.golangci.yml +++ /dev/null @@ -1,29 +0,0 @@ -linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true - enable: - - bodyclose - - deadcode - - depguard - - dupl - - errcheck - - gofmt - - goimports - - goprintffuncname - - gosec - - gosimple - - govet - - ineffassign - - interfacer - - misspell - - nakedret - - nolintlint - - rowserrcheck - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - whitespace - diff --git a/vendor/github.com/blevesearch/zapx/v12/LICENSE b/vendor/github.com/blevesearch/zapx/v12/LICENSE deleted file mode 100644 index 7a4a3ea24..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/blevesearch/zapx/v12/README.md b/vendor/github.com/blevesearch/zapx/v12/README.md deleted file mode 100644 index 4cbf1a145..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# zapx file format - -The zapx module is fork of [zap](https://github.com/blevesearch/zap) module which maintains file format compatibility, but removes dependency on bleve, and instead depends only on the indepenent interface modules: - -- [bleve_index_api](https://github.com/blevesearch/scorch_segment_api) -- [scorch_segment_api](https://github.com/blevesearch/scorch_segment_api) - -Advanced ZAP File Format Documentation is [here](zap.md). - -The file is written in the reverse order that we typically access data. This helps us write in one pass since later sections of the file require file offsets of things we've already written. - -Current usage: - -- mmap the entire file -- crc-32 bytes and version are in fixed position at end of the file -- reading remainder of footer could be version specific -- remainder of footer gives us: - - 3 important offsets (docValue , fields index and stored data index) - - 2 important values (number of docs and chunk factor) -- field data is processed once and memoized onto the heap so that we never have to go back to disk for it -- access to stored data by doc number means first navigating to the stored data index, then accessing a fixed position offset into that slice, which gives us the actual address of the data. the first bytes of that section tell us the size of data so that we know where it ends. -- access to all other indexed data follows the following pattern: - - first know the field name -> convert to id - - next navigate to term dictionary for that field - - some operations stop here and do dictionary ops - - next use dictionary to navigate to posting list for a specific term - - walk posting list - - if necessary, walk posting details as we go - - if location info is desired, consult location bitmap to see if it is there - -## stored fields section - -- for each document - - preparation phase: - - produce a slice of metadata bytes and data bytes - - produce these slices in field id order - - field value is appended to the data slice - - metadata slice is varint encoded with the following values for each field value - - field id (uint16) - - field type (byte) - - field value start offset in uncompressed data slice (uint64) - - field value length (uint64) - - field number of array positions (uint64) - - one additional value for each array position (uint64) - - compress the data slice using snappy - - file writing phase: - - remember the start offset for this document - - write out meta data length (varint uint64) - - write out compressed data length (varint uint64) - - write out the metadata bytes - - write out the compressed data bytes - -## stored fields idx - -- for each document - - write start offset (remembered from previous section) of stored data (big endian uint64) - -With this index and a known document number, we have direct access to all the stored field data. - -## posting details (freq/norm) section - -- for each posting list - - produce a slice containing multiple consecutive chunks (each chunk is varint stream) - - produce a slice remembering offsets of where each chunk starts - - preparation phase: - - for each hit in the posting list - - if this hit is in next chunk close out encoding of last chunk and record offset start of next - - encode term frequency (uint64) - - encode norm factor (float32) - - file writing phase: - - remember start position for this posting list details - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. - -## posting details (location) section - -- for each posting list - - produce a slice containing multiple consecutive chunks (each chunk is varint stream) - - produce a slice remembering offsets of where each chunk starts - - preparation phase: - - for each hit in the posting list - - if this hit is in next chunk close out encoding of last chunk and record offset start of next - - encode field (uint16) - - encode field pos (uint64) - - encode field start (uint64) - - encode field end (uint64) - - encode number of array positions to follow (uint64) - - encode each array position (each uint64) - - file writing phase: - - remember start position for this posting list details - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. - -## postings list section - -- for each posting list - - preparation phase: - - encode roaring bitmap posting list to bytes (so we know the length) - - file writing phase: - - remember the start position for this posting list - - write freq/norm details offset (remembered from previous, as varint uint64) - - write location details offset (remembered from previous, as varint uint64) - - write length of encoded roaring bitmap - - write the serialized roaring bitmap data - -## dictionary - -- for each field - - preparation phase: - - encode vellum FST with dictionary data pointing to file offset of posting list (remembered from previous) - - file writing phase: - - remember the start position of this persistDictionary - - write length of vellum data (varint uint64) - - write out vellum data - -## fields section - -- for each field - - file writing phase: - - remember start offset for each field - - write dictionary address (remembered from previous) (varint uint64) - - write length of field name (varint uint64) - - write field name bytes - -## fields idx - -- for each field - - file writing phase: - - write big endian uint64 of start offset for each field - -NOTE: currently we don't know or record the length of this fields index. Instead we rely on the fact that we know it immediately precedes a footer of known size. - -## fields DocValue - -- for each field - - preparation phase: - - produce a slice containing multiple consecutive chunks, where each chunk is composed of a meta section followed by compressed columnar field data - - produce a slice remembering the length of each chunk - - file writing phase: - - remember the start position of this first field DocValue offset in the footer - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -NOTE: currently the meta header inside each chunk gives clue to the location offsets and size of the data pertaining to a given docID and any -read operation leverage that meta information to extract the document specific data from the file. - -## footer - -- file writing phase - - write number of docs (big endian uint64) - - write stored field index location (big endian uint64) - - write field index location (big endian uint64) - - write field docValue location (big endian uint64) - - write out chunk factor (big endian uint32) - - write out version (big endian uint32) - - write out file CRC of everything preceding this (big endian uint32) diff --git a/vendor/github.com/blevesearch/zapx/v12/build.go b/vendor/github.com/blevesearch/zapx/v12/build.go deleted file mode 100644 index eec4dde08..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/build.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bufio" - "math" - "os" - - "github.com/blevesearch/vellum" -) - -const Version uint32 = 12 - -const Type string = "zap" - -const fieldNotUninverted = math.MaxUint64 - -func (sb *SegmentBase) Persist(path string) error { - return PersistSegmentBase(sb, path) -} - -// PersistSegmentBase persists SegmentBase in the zap file format. -func PersistSegmentBase(sb *SegmentBase, path string) error { - flag := os.O_RDWR | os.O_CREATE - - f, err := os.OpenFile(path, flag, 0600) - if err != nil { - return err - } - - cleanup := func() { - _ = f.Close() - _ = os.Remove(path) - } - - br := bufio.NewWriter(f) - - _, err = br.Write(sb.mem) - if err != nil { - cleanup() - return err - } - - err = persistFooter(sb.numDocs, sb.storedIndexOffset, sb.fieldsIndexOffset, sb.docValueOffset, - sb.chunkMode, sb.memCRC, br) - if err != nil { - cleanup() - return err - } - - err = br.Flush() - if err != nil { - cleanup() - return err - } - - err = f.Sync() - if err != nil { - cleanup() - return err - } - - err = f.Close() - if err != nil { - cleanup() - return err - } - - return nil -} - -func persistStoredFieldValues(fieldID int, - storedFieldValues [][]byte, stf []byte, spf [][]uint64, - curr int, metaEncode varintEncoder, data []byte) ( - int, []byte, error) { - for i := 0; i < len(storedFieldValues); i++ { - // encode field - _, err := metaEncode(uint64(fieldID)) - if err != nil { - return 0, nil, err - } - // encode type - _, err = metaEncode(uint64(stf[i])) - if err != nil { - return 0, nil, err - } - // encode start offset - _, err = metaEncode(uint64(curr)) - if err != nil { - return 0, nil, err - } - // end len - _, err = metaEncode(uint64(len(storedFieldValues[i]))) - if err != nil { - return 0, nil, err - } - // encode number of array pos - _, err = metaEncode(uint64(len(spf[i]))) - if err != nil { - return 0, nil, err - } - // encode all array positions - for _, pos := range spf[i] { - _, err = metaEncode(pos) - if err != nil { - return 0, nil, err - } - } - - data = append(data, storedFieldValues[i]...) - curr += len(storedFieldValues[i]) - } - - return curr, data, nil -} - -func InitSegmentBase(mem []byte, memCRC uint32, chunkMode uint32, - fieldsMap map[string]uint16, fieldsInv []string, numDocs uint64, - storedIndexOffset uint64, fieldsIndexOffset uint64, docValueOffset uint64, - dictLocs []uint64) (*SegmentBase, error) { - sb := &SegmentBase{ - mem: mem, - memCRC: memCRC, - chunkMode: chunkMode, - fieldsMap: fieldsMap, - fieldsInv: fieldsInv, - numDocs: numDocs, - storedIndexOffset: storedIndexOffset, - fieldsIndexOffset: fieldsIndexOffset, - docValueOffset: docValueOffset, - dictLocs: dictLocs, - fieldDvReaders: make(map[uint16]*docValueReader), - fieldFSTs: make(map[uint16]*vellum.FST), - } - sb.updateSize() - - err := sb.loadDvReaders() - if err != nil { - return nil, err - } - - return sb, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v12/chunk.go b/vendor/github.com/blevesearch/zapx/v12/chunk.go deleted file mode 100644 index fe9f398da..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/chunk.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2019 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" -) - -// LegacyChunkMode was the original chunk mode (always chunk size 1024) -// this mode is still used for chunking doc values. -var LegacyChunkMode uint32 = 1024 - -// DefaultChunkMode is the most recent improvement to chunking and should -// be used by default. -var DefaultChunkMode uint32 = 1025 - -func getChunkSize(chunkMode uint32, cardinality uint64, maxDocs uint64) (uint64, error) { - switch { - // any chunkMode <= 1024 will always chunk with chunkSize=chunkMode - case chunkMode <= 1024: - // legacy chunk size - return uint64(chunkMode), nil - - case chunkMode == 1025: - // attempt at simple improvement - // theory - the point of chunking is to put a bound on the maximum number of - // calls to Next() needed to find a random document. ie, you should be able - // to do one jump to the correct chunk, and then walk through at most - // chunk-size items - // previously 1024 was chosen as the chunk size, but this is particularly - // wasteful for low cardinality terms. the observation is that if there - // are less than 1024 items, why not put them all in one chunk, - // this way you'll still achieve the same goal of visiting at most - // chunk-size items. - // no attempt is made to tweak any other case - if cardinality <= 1024 { - return maxDocs, nil - } - return 1024, nil - } - return 0, fmt.Errorf("unknown chunk mode %d", chunkMode) -} diff --git a/vendor/github.com/blevesearch/zapx/v12/go.mod b/vendor/github.com/blevesearch/zapx/v12/go.mod deleted file mode 100644 index e6f734cfd..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/blevesearch/zapx/v12 - -go 1.12 - -require ( - github.com/RoaringBitmap/roaring v0.4.23 - github.com/blevesearch/bleve_index_api v1.0.0 - github.com/blevesearch/mmap-go v1.0.2 - github.com/blevesearch/scorch_segment_api/v2 v2.0.1 - github.com/blevesearch/vellum v1.0.3 - github.com/golang/snappy v0.0.1 - github.com/spf13/cobra v0.0.5 -) diff --git a/vendor/github.com/blevesearch/zapx/v12/go.sum b/vendor/github.com/blevesearch/zapx/v12/go.sum deleted file mode 100644 index 68e45348c..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/go.sum +++ /dev/null @@ -1,73 +0,0 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= -github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= -github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= -github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1 h1:fd+hPtZ8GsbqPK1HslGp7Vhoik4arZteA/IsCEgOisw= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1/go.mod h1:lq7yK2jQy1yQjtjTfU931aVqz7pYxEudHaDwOt1tXfU= -github.com/blevesearch/vellum v1.0.3 h1:U86G41A7CtXNzzpIJHM8lSTUqz1Mp8U870TkcdCzZc8= -github.com/blevesearch/vellum v1.0.3/go.mod h1:2u5ax02KeDuNWu4/C+hVQMD6uLN4txH1JbtpaDNLJRo= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= -github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/blevesearch/zapx/v12/intDecoder.go b/vendor/github.com/blevesearch/zapx/v12/intDecoder.go deleted file mode 100644 index e96809314..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/intDecoder.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) 2019 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "fmt" -) - -type chunkedIntDecoder struct { - startOffset uint64 - dataStartOffset uint64 - chunkOffsets []uint64 - curChunkBytes []byte - data []byte - r *memUvarintReader -} - -func newChunkedIntDecoder(buf []byte, offset uint64) *chunkedIntDecoder { - rv := &chunkedIntDecoder{startOffset: offset, data: buf} - var n, numChunks uint64 - var read int - if offset == termNotEncoded { - numChunks = 0 - } else { - numChunks, read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64]) - } - - n += uint64(read) - if cap(rv.chunkOffsets) >= int(numChunks) { - rv.chunkOffsets = rv.chunkOffsets[:int(numChunks)] - } else { - rv.chunkOffsets = make([]uint64, int(numChunks)) - } - for i := 0; i < int(numChunks); i++ { - rv.chunkOffsets[i], read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64]) - n += uint64(read) - } - rv.dataStartOffset = offset + n - return rv -} - -func (d *chunkedIntDecoder) loadChunk(chunk int) error { - if d.startOffset == termNotEncoded { - d.r = newMemUvarintReader([]byte(nil)) - return nil - } - - if chunk >= len(d.chunkOffsets) { - return fmt.Errorf("tried to load freq chunk that doesn't exist %d/(%d)", - chunk, len(d.chunkOffsets)) - } - - end, start := d.dataStartOffset, d.dataStartOffset - s, e := readChunkBoundary(chunk, d.chunkOffsets) - start += s - end += e - d.curChunkBytes = d.data[start:end] - if d.r == nil { - d.r = newMemUvarintReader(d.curChunkBytes) - } else { - d.r.Reset(d.curChunkBytes) - } - - return nil -} - -func (d *chunkedIntDecoder) reset() { - d.startOffset = 0 - d.dataStartOffset = 0 - d.chunkOffsets = d.chunkOffsets[:0] - d.curChunkBytes = d.curChunkBytes[:0] - d.data = d.data[:0] - if d.r != nil { - d.r.Reset([]byte(nil)) - } -} - -func (d *chunkedIntDecoder) isNil() bool { - return d.curChunkBytes == nil -} - -func (d *chunkedIntDecoder) readUvarint() (uint64, error) { - return d.r.ReadUvarint() -} - -func (d *chunkedIntDecoder) SkipUvarint() { - d.r.SkipUvarint() -} - -func (d *chunkedIntDecoder) SkipBytes(count int) { - d.r.SkipBytes(count) -} - -func (d *chunkedIntDecoder) Len() int { - return d.r.Len() -} diff --git a/vendor/github.com/blevesearch/zapx/v12/intcoder.go b/vendor/github.com/blevesearch/zapx/v12/intcoder.go deleted file mode 100644 index 7682593e9..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/intcoder.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "io" - "math" -) - -const termNotEncoded = math.MaxUint64 - -type chunkedIntCoder struct { - final []byte - chunkSize uint64 - chunkBuf bytes.Buffer - chunkLens []uint64 - currChunk uint64 - - buf []byte -} - -// newChunkedIntCoder returns a new chunk int coder which packs data into -// chunks based on the provided chunkSize and supports up to the specified -// maxDocNum -func newChunkedIntCoder(chunkSize uint64, maxDocNum uint64) *chunkedIntCoder { - total := maxDocNum/chunkSize + 1 - rv := &chunkedIntCoder{ - chunkSize: chunkSize, - chunkLens: make([]uint64, total), - final: make([]byte, 0, 64), - } - - return rv -} - -// Reset lets you reuse this chunked int coder. buffers are reset and reused -// from previous use. you cannot change the chunk size or max doc num. -func (c *chunkedIntCoder) Reset() { - c.final = c.final[:0] - c.chunkBuf.Reset() - c.currChunk = 0 - for i := range c.chunkLens { - c.chunkLens[i] = 0 - } -} - -// SetChunkSize changes the chunk size. It is only valid to do so -// with a new chunkedIntCoder, or immediately after calling Reset() -func (c *chunkedIntCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { - total := int(maxDocNum/chunkSize + 1) - c.chunkSize = chunkSize - if cap(c.chunkLens) < total { - c.chunkLens = make([]uint64, total) - } else { - c.chunkLens = c.chunkLens[:total] - } -} - -// Add encodes the provided integers into the correct chunk for the provided -// doc num. You MUST call Add() with increasing docNums. -func (c *chunkedIntCoder) Add(docNum uint64, vals ...uint64) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // starting a new chunk - c.Close() - c.chunkBuf.Reset() - c.currChunk = chunk - } - - if len(c.buf) < binary.MaxVarintLen64 { - c.buf = make([]byte, binary.MaxVarintLen64) - } - - for _, val := range vals { - wb := binary.PutUvarint(c.buf, val) - _, err := c.chunkBuf.Write(c.buf[:wb]) - if err != nil { - return err - } - } - - return nil -} - -func (c *chunkedIntCoder) AddBytes(docNum uint64, buf []byte) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // starting a new chunk - c.Close() - c.chunkBuf.Reset() - c.currChunk = chunk - } - - _, err := c.chunkBuf.Write(buf) - return err -} - -// Close indicates you are done calling Add() this allows the final chunk -// to be encoded. -func (c *chunkedIntCoder) Close() { - encodingBytes := c.chunkBuf.Bytes() - c.chunkLens[c.currChunk] = uint64(len(encodingBytes)) - c.final = append(c.final, encodingBytes...) - c.currChunk = uint64(cap(c.chunkLens)) // sentinel to detect double close -} - -// Write commits all the encoded chunked integers to the provided writer. -func (c *chunkedIntCoder) Write(w io.Writer) (int, error) { - bufNeeded := binary.MaxVarintLen64 * (1 + len(c.chunkLens)) - if len(c.buf) < bufNeeded { - c.buf = make([]byte, bufNeeded) - } - buf := c.buf - - // convert the chunk lengths into chunk offsets - chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens) - - // write out the number of chunks & each chunk offsets - n := binary.PutUvarint(buf, uint64(len(chunkOffsets))) - for _, chunkOffset := range chunkOffsets { - n += binary.PutUvarint(buf[n:], chunkOffset) - } - - tw, err := w.Write(buf[:n]) - if err != nil { - return tw, err - } - - // write out the data - nw, err := w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - return tw, nil -} - -// writeAt commits all the encoded chunked integers to the provided writer -// and returns the starting offset, total bytes written and an error -func (c *chunkedIntCoder) writeAt(w io.Writer) (uint64, int, error) { - startOffset := uint64(termNotEncoded) - if len(c.final) <= 0 { - return startOffset, 0, nil - } - - if chw := w.(*CountHashWriter); chw != nil { - startOffset = uint64(chw.Count()) - } - - tw, err := c.Write(w) - return startOffset, tw, err -} - -func (c *chunkedIntCoder) FinalSize() int { - return len(c.final) -} - -// modifyLengthsToEndOffsets converts the chunk length array -// to a chunk offset array. The readChunkBoundary -// will figure out the start and end of every chunk from -// these offsets. Starting offset of i'th index is stored -// in i-1'th position except for 0'th index and ending offset -// is stored at i'th index position. -// For 0'th element, starting position is always zero. -// eg: -// Lens -> 5 5 5 5 => 5 10 15 20 -// Lens -> 0 5 0 5 => 0 5 5 10 -// Lens -> 0 0 0 5 => 0 0 0 5 -// Lens -> 5 0 0 0 => 5 5 5 5 -// Lens -> 0 5 0 0 => 0 5 5 5 -// Lens -> 0 0 5 0 => 0 0 5 5 -func modifyLengthsToEndOffsets(lengths []uint64) []uint64 { - var runningOffset uint64 - var index, i int - for i = 1; i <= len(lengths); i++ { - runningOffset += lengths[i-1] - lengths[index] = runningOffset - index++ - } - return lengths -} - -func readChunkBoundary(chunk int, offsets []uint64) (uint64, uint64) { - var start uint64 - if chunk > 0 { - start = offsets[chunk-1] - } - return start, offsets[chunk] -} diff --git a/vendor/github.com/blevesearch/zapx/v12/memuvarint.go b/vendor/github.com/blevesearch/zapx/v12/memuvarint.go deleted file mode 100644 index 0c10c83a4..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/memuvarint.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" -) - -type memUvarintReader struct { - C int // index of next byte to read from S - S []byte -} - -func newMemUvarintReader(s []byte) *memUvarintReader { - return &memUvarintReader{S: s} -} - -// Len returns the number of unread bytes. -func (r *memUvarintReader) Len() int { - n := len(r.S) - r.C - if n < 0 { - return 0 - } - return n -} - -// ReadUvarint reads an encoded uint64. The original code this was -// based on is at encoding/binary/ReadUvarint(). -func (r *memUvarintReader) ReadUvarint() (uint64, error) { - var x uint64 - var s uint - var C = r.C - var S = r.S - - for { - b := S[C] - C++ - - if b < 0x80 { - r.C = C - - // why 63? The original code had an 'i += 1' loop var and - // checked for i > 9 || i == 9 ...; but, we no longer - // check for the i var, but instead check here for s, - // which is incremented by 7. So, 7*9 == 63. - // - // why the "extra" >= check? The normal case is that s < - // 63, so we check this single >= guard first so that we - // hit the normal, nil-error return pathway sooner. - if s >= 63 && (s > 63 || s == 63 && b > 1) { - return 0, fmt.Errorf("memUvarintReader overflow") - } - - return x | uint64(b)< 0 { - storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops, - fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - - dictLocs, docValueOffset, err = persistMergedRest(segments, drops, - fieldsInv, fieldsMap, fieldsSame, - newDocNums, numDocs, chunkMode, cr, closeCh) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - } else { - dictLocs = make([]uint64, len(fieldsInv)) - } - - fieldsIndexOffset, err = persistFields(fieldsInv, cr, dictLocs) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - - return newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, dictLocs, fieldsInv, fieldsMap, nil -} - -// mapFields takes the fieldsInv list and returns a map of fieldName -// to fieldID+1 -func mapFields(fields []string) map[string]uint16 { - rv := make(map[string]uint16, len(fields)) - for i, fieldName := range fields { - rv[fieldName] = uint16(i) + 1 - } - return rv -} - -// computeNewDocCount determines how many documents will be in the newly -// merged segment when obsoleted docs are dropped -func computeNewDocCount(segments []*SegmentBase, drops []*roaring.Bitmap) uint64 { - var newDocCount uint64 - for segI, segment := range segments { - newDocCount += segment.numDocs - if drops[segI] != nil { - newDocCount -= drops[segI].GetCardinality() - } - } - return newDocCount -} - -func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap, - fieldsInv []string, fieldsMap map[string]uint16, fieldsSame bool, - newDocNumsIn [][]uint64, newSegDocCount uint64, chunkMode uint32, - w *CountHashWriter, closeCh chan struct{}) ([]uint64, uint64, error) { - var bufMaxVarintLen64 []byte = make([]byte, binary.MaxVarintLen64) - var bufLoc []uint64 - - var postings *PostingsList - var postItr *PostingsIterator - - rv := make([]uint64, len(fieldsInv)) - fieldDvLocsStart := make([]uint64, len(fieldsInv)) - fieldDvLocsEnd := make([]uint64, len(fieldsInv)) - - // these int coders are initialized with chunk size 1024 - // however this will be reset to the correct chunk size - // while processing each individual field-term section - tfEncoder := newChunkedIntCoder(1024, newSegDocCount-1) - locEncoder := newChunkedIntCoder(1024, newSegDocCount-1) - - var vellumBuf bytes.Buffer - newVellum, err := vellum.New(&vellumBuf, nil) - if err != nil { - return nil, 0, err - } - - newRoaring := roaring.NewBitmap() - - // for each field - for fieldID, fieldName := range fieldsInv { - // collect FST iterators from all active segments for this field - var newDocNums [][]uint64 - var drops []*roaring.Bitmap - var dicts []*Dictionary - var itrs []vellum.Iterator - - var segmentsInFocus []*SegmentBase - - for segmentI, segment := range segments { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - dict, err2 := segment.dictionary(fieldName) - if err2 != nil { - return nil, 0, err2 - } - if dict != nil && dict.fst != nil { - itr, err2 := dict.fst.Iterator(nil, nil) - if err2 != nil && err2 != vellum.ErrIteratorDone { - return nil, 0, err2 - } - if itr != nil { - newDocNums = append(newDocNums, newDocNumsIn[segmentI]) - if dropsIn[segmentI] != nil && !dropsIn[segmentI].IsEmpty() { - drops = append(drops, dropsIn[segmentI]) - } else { - drops = append(drops, nil) - } - dicts = append(dicts, dict) - itrs = append(itrs, itr) - segmentsInFocus = append(segmentsInFocus, segment) - } - } - } - - var prevTerm []byte - - newRoaring.Clear() - - var lastDocNum, lastFreq, lastNorm uint64 - - // determines whether to use "1-hit" encoding optimization - // when a term appears in only 1 doc, with no loc info, - // has freq of 1, and the docNum fits into 31-bits - use1HitEncoding := func(termCardinality uint64) (bool, uint64, uint64) { - if termCardinality == uint64(1) && locEncoder.FinalSize() <= 0 { - docNum := uint64(newRoaring.Minimum()) - if under32Bits(docNum) && docNum == lastDocNum && lastFreq == 1 { - return true, docNum, lastNorm - } - } - return false, 0, 0 - } - - finishTerm := func(term []byte) error { - tfEncoder.Close() - locEncoder.Close() - - postingsOffset, err := writePostings(newRoaring, - tfEncoder, locEncoder, use1HitEncoding, w, bufMaxVarintLen64) - if err != nil { - return err - } - - if postingsOffset > 0 { - err = newVellum.Insert(term, postingsOffset) - if err != nil { - return err - } - } - - newRoaring.Clear() - - tfEncoder.Reset() - locEncoder.Reset() - - lastDocNum = 0 - lastFreq = 0 - lastNorm = 0 - - return nil - } - - enumerator, err := newEnumerator(itrs) - - for err == nil { - term, itrI, postingsOffset := enumerator.Current() - - if !bytes.Equal(prevTerm, term) { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - // if the term changed, write out the info collected - // for the previous term - err = finishTerm(prevTerm) - if err != nil { - return nil, 0, err - } - } - if !bytes.Equal(prevTerm, term) || prevTerm == nil { - // compute cardinality of field-term in new seg - var newCard uint64 - lowItrIdxs, lowItrVals := enumerator.GetLowIdxsAndValues() - for i, idx := range lowItrIdxs { - pl, err := dicts[idx].postingsListFromOffset(lowItrVals[i], drops[idx], nil) - if err != nil { - return nil, 0, err - } - newCard += pl.Count() - } - // compute correct chunk size with this - chunkSize, err := getChunkSize(chunkMode, newCard, newSegDocCount) - if err != nil { - return nil, 0, err - } - // update encoders chunk - tfEncoder.SetChunkSize(chunkSize, newSegDocCount-1) - locEncoder.SetChunkSize(chunkSize, newSegDocCount-1) - } - - postings, err = dicts[itrI].postingsListFromOffset( - postingsOffset, drops[itrI], postings) - if err != nil { - return nil, 0, err - } - - postItr = postings.iterator(true, true, true, postItr) - - // can no longer optimize by copying, since chunk factor could have changed - lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs( - fieldsMap, term, postItr, newDocNums[itrI], newRoaring, - tfEncoder, locEncoder, bufLoc) - - if err != nil { - return nil, 0, err - } - - prevTerm = prevTerm[:0] // copy to prevTerm in case Next() reuses term mem - prevTerm = append(prevTerm, term...) - - err = enumerator.Next() - } - if err != vellum.ErrIteratorDone { - return nil, 0, err - } - - err = finishTerm(prevTerm) - if err != nil { - return nil, 0, err - } - - dictOffset := uint64(w.Count()) - - err = newVellum.Close() - if err != nil { - return nil, 0, err - } - vellumData := vellumBuf.Bytes() - - // write out the length of the vellum data - n := binary.PutUvarint(bufMaxVarintLen64, uint64(len(vellumData))) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return nil, 0, err - } - - // write this vellum to disk - _, err = w.Write(vellumData) - if err != nil { - return nil, 0, err - } - - rv[fieldID] = dictOffset - - // get the field doc value offset (start) - fieldDvLocsStart[fieldID] = uint64(w.Count()) - - // update the field doc values - // NOTE: doc values continue to use legacy chunk mode - chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0) - if err != nil { - return nil, 0, err - } - fdvEncoder := newChunkedContentCoder(chunkSize, newSegDocCount-1, w, true) - - fdvReadersAvailable := false - var dvIterClone *docValueReader - for segmentI, segment := range segmentsInFocus { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - fieldIDPlus1 := uint16(segment.fieldsMap[fieldName]) - if dvIter, exists := segment.fieldDvReaders[fieldIDPlus1-1]; exists && - dvIter != nil { - fdvReadersAvailable = true - dvIterClone = dvIter.cloneInto(dvIterClone) - err = dvIterClone.iterateAllDocValues(segment, func(docNum uint64, terms []byte) error { - if newDocNums[segmentI][docNum] == docDropped { - return nil - } - err := fdvEncoder.Add(newDocNums[segmentI][docNum], terms) - if err != nil { - return err - } - return nil - }) - if err != nil { - return nil, 0, err - } - } - } - - if fdvReadersAvailable { - err = fdvEncoder.Close() - if err != nil { - return nil, 0, err - } - - // persist the doc value details for this field - _, err = fdvEncoder.Write() - if err != nil { - return nil, 0, err - } - - // get the field doc value offset (end) - fieldDvLocsEnd[fieldID] = uint64(w.Count()) - } else { - fieldDvLocsStart[fieldID] = fieldNotUninverted - fieldDvLocsEnd[fieldID] = fieldNotUninverted - } - - // reset vellum buffer and vellum builder - vellumBuf.Reset() - err = newVellum.Reset(&vellumBuf) - if err != nil { - return nil, 0, err - } - } - - fieldDvLocsOffset := uint64(w.Count()) - - buf := bufMaxVarintLen64 - for i := 0; i < len(fieldDvLocsStart); i++ { - n := binary.PutUvarint(buf, fieldDvLocsStart[i]) - _, err := w.Write(buf[:n]) - if err != nil { - return nil, 0, err - } - n = binary.PutUvarint(buf, fieldDvLocsEnd[i]) - _, err = w.Write(buf[:n]) - if err != nil { - return nil, 0, err - } - } - - return rv, fieldDvLocsOffset, nil -} - -func mergeTermFreqNormLocs(fieldsMap map[string]uint16, term []byte, postItr *PostingsIterator, - newDocNums []uint64, newRoaring *roaring.Bitmap, - tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, bufLoc []uint64) ( - lastDocNum uint64, lastFreq uint64, lastNorm uint64, bufLocOut []uint64, err error) { - next, err := postItr.Next() - for next != nil && err == nil { - hitNewDocNum := newDocNums[next.Number()] - if hitNewDocNum == docDropped { - return 0, 0, 0, nil, fmt.Errorf("see hit with dropped docNum") - } - - newRoaring.Add(uint32(hitNewDocNum)) - - nextFreq := next.Frequency() - nextNorm := uint64(math.Float32bits(float32(next.Norm()))) - - locs := next.Locations() - - err = tfEncoder.Add(hitNewDocNum, - encodeFreqHasLocs(nextFreq, len(locs) > 0), nextNorm) - if err != nil { - return 0, 0, 0, nil, err - } - - if len(locs) > 0 { - numBytesLocs := 0 - for _, loc := range locs { - ap := loc.ArrayPositions() - numBytesLocs += totalUvarintBytes(uint64(fieldsMap[loc.Field()]-1), - loc.Pos(), loc.Start(), loc.End(), uint64(len(ap)), ap) - } - - err = locEncoder.Add(hitNewDocNum, uint64(numBytesLocs)) - if err != nil { - return 0, 0, 0, nil, err - } - - for _, loc := range locs { - ap := loc.ArrayPositions() - if cap(bufLoc) < 5+len(ap) { - bufLoc = make([]uint64, 0, 5+len(ap)) - } - args := bufLoc[0:5] - args[0] = uint64(fieldsMap[loc.Field()] - 1) - args[1] = loc.Pos() - args[2] = loc.Start() - args[3] = loc.End() - args[4] = uint64(len(ap)) - args = append(args, ap...) - err = locEncoder.Add(hitNewDocNum, args...) - if err != nil { - return 0, 0, 0, nil, err - } - } - } - - lastDocNum = hitNewDocNum - lastFreq = nextFreq - lastNorm = nextNorm - - next, err = postItr.Next() - } - - return lastDocNum, lastFreq, lastNorm, bufLoc, err -} - -func writePostings(postings *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder, - use1HitEncoding func(uint64) (bool, uint64, uint64), - w *CountHashWriter, bufMaxVarintLen64 []byte) ( - offset uint64, err error) { - termCardinality := postings.GetCardinality() - if termCardinality <= 0 { - return 0, nil - } - - if use1HitEncoding != nil { - encodeAs1Hit, docNum1Hit, normBits1Hit := use1HitEncoding(termCardinality) - if encodeAs1Hit { - return FSTValEncode1Hit(docNum1Hit, normBits1Hit), nil - } - } - - var tfOffset uint64 - tfOffset, _, err = tfEncoder.writeAt(w) - if err != nil { - return 0, err - } - - var locOffset uint64 - locOffset, _, err = locEncoder.writeAt(w) - if err != nil { - return 0, err - } - - postingsOffset := uint64(w.Count()) - - n := binary.PutUvarint(bufMaxVarintLen64, tfOffset) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return 0, err - } - - n = binary.PutUvarint(bufMaxVarintLen64, locOffset) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return 0, err - } - - _, err = writeRoaringWithLen(postings, w, bufMaxVarintLen64) - if err != nil { - return 0, err - } - - return postingsOffset, nil -} - -type varintEncoder func(uint64) (int, error) - -func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap, - fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64, - w *CountHashWriter, closeCh chan struct{}) (uint64, [][]uint64, error) { - var rv [][]uint64 // The remapped or newDocNums for each segment. - - var newDocNum uint64 - - var curr int - var data, compressed []byte - var metaBuf bytes.Buffer - varBuf := make([]byte, binary.MaxVarintLen64) - metaEncode := func(val uint64) (int, error) { - wb := binary.PutUvarint(varBuf, val) - return metaBuf.Write(varBuf[:wb]) - } - - vals := make([][][]byte, len(fieldsInv)) - typs := make([][]byte, len(fieldsInv)) - poss := make([][][]uint64, len(fieldsInv)) - - var posBuf []uint64 - - docNumOffsets := make([]uint64, newSegDocCount) - - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - defer visitDocumentCtxPool.Put(vdc) - - // for each segment - for segI, segment := range segments { - // check for the closure in meantime - if isClosed(closeCh) { - return 0, nil, seg.ErrClosed - } - - segNewDocNums := make([]uint64, segment.numDocs) - - dropsI := drops[segI] - - // optimize when the field mapping is the same across all - // segments and there are no deletions, via byte-copying - // of stored docs bytes directly to the writer - if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) { - err := segment.copyStoredDocs(newDocNum, docNumOffsets, w) - if err != nil { - return 0, nil, err - } - - for i := uint64(0); i < segment.numDocs; i++ { - segNewDocNums[i] = newDocNum - newDocNum++ - } - rv = append(rv, segNewDocNums) - - continue - } - - // for each doc num - for docNum := uint64(0); docNum < segment.numDocs; docNum++ { - // TODO: roaring's API limits docNums to 32-bits? - if dropsI != nil && dropsI.Contains(uint32(docNum)) { - segNewDocNums[docNum] = docDropped - continue - } - - segNewDocNums[docNum] = newDocNum - - curr = 0 - metaBuf.Reset() - data = data[:0] - - posTemp := posBuf - - // collect all the data - for i := 0; i < len(fieldsInv); i++ { - vals[i] = vals[i][:0] - typs[i] = typs[i][:0] - poss[i] = poss[i][:0] - } - err := segment.visitStoredFields(vdc, docNum, func(field string, typ byte, value []byte, pos []uint64) bool { - fieldID := int(fieldsMap[field]) - 1 - vals[fieldID] = append(vals[fieldID], value) - typs[fieldID] = append(typs[fieldID], typ) - - // copy array positions to preserve them beyond the scope of this callback - var curPos []uint64 - if len(pos) > 0 { - if cap(posTemp) < len(pos) { - posBuf = make([]uint64, len(pos)*len(fieldsInv)) - posTemp = posBuf - } - curPos = posTemp[0:len(pos)] - copy(curPos, pos) - posTemp = posTemp[len(pos):] - } - poss[fieldID] = append(poss[fieldID], curPos) - - return true - }) - if err != nil { - return 0, nil, err - } - - // _id field special case optimizes ExternalID() lookups - idFieldVal := vals[uint16(0)][0] - _, err = metaEncode(uint64(len(idFieldVal))) - if err != nil { - return 0, nil, err - } - - // now walk the non-"_id" fields in order - for fieldID := 1; fieldID < len(fieldsInv); fieldID++ { - storedFieldValues := vals[fieldID] - - stf := typs[fieldID] - spf := poss[fieldID] - - var err2 error - curr, data, err2 = persistStoredFieldValues(fieldID, - storedFieldValues, stf, spf, curr, metaEncode, data) - if err2 != nil { - return 0, nil, err2 - } - } - - metaBytes := metaBuf.Bytes() - - compressed = snappy.Encode(compressed[:cap(compressed)], data) - - // record where we're about to start writing - docNumOffsets[newDocNum] = uint64(w.Count()) - - // write out the meta len and compressed data len - _, err = writeUvarints(w, - uint64(len(metaBytes)), - uint64(len(idFieldVal)+len(compressed))) - if err != nil { - return 0, nil, err - } - // now write the meta - _, err = w.Write(metaBytes) - if err != nil { - return 0, nil, err - } - // now write the _id field val (counted as part of the 'compressed' data) - _, err = w.Write(idFieldVal) - if err != nil { - return 0, nil, err - } - // now write the compressed data - _, err = w.Write(compressed) - if err != nil { - return 0, nil, err - } - - newDocNum++ - } - - rv = append(rv, segNewDocNums) - } - - // return value is the start of the stored index - storedIndexOffset := uint64(w.Count()) - - // now write out the stored doc index - for _, docNumOffset := range docNumOffsets { - err := binary.Write(w, binary.BigEndian, docNumOffset) - if err != nil { - return 0, nil, err - } - } - - return storedIndexOffset, rv, nil -} - -// copyStoredDocs writes out a segment's stored doc info, optimized by -// using a single Write() call for the entire set of bytes. The -// newDocNumOffsets is filled with the new offsets for each doc. -func (s *SegmentBase) copyStoredDocs(newDocNum uint64, newDocNumOffsets []uint64, - w *CountHashWriter) error { - if s.numDocs <= 0 { - return nil - } - - indexOffset0, storedOffset0, _, _, _ := - s.getDocStoredOffsets(0) // the segment's first doc - - indexOffsetN, storedOffsetN, readN, metaLenN, dataLenN := - s.getDocStoredOffsets(s.numDocs - 1) // the segment's last doc - - storedOffset0New := uint64(w.Count()) - - storedBytes := s.mem[storedOffset0 : storedOffsetN+readN+metaLenN+dataLenN] - _, err := w.Write(storedBytes) - if err != nil { - return err - } - - // remap the storedOffset's for the docs into new offsets relative - // to storedOffset0New, filling the given docNumOffsetsOut array - for indexOffset := indexOffset0; indexOffset <= indexOffsetN; indexOffset += 8 { - storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) - storedOffsetNew := storedOffset - storedOffset0 + storedOffset0New - newDocNumOffsets[newDocNum] = storedOffsetNew - newDocNum += 1 - } - - return nil -} - -// mergeFields builds a unified list of fields used across all the -// input segments, and computes whether the fields are the same across -// segments (which depends on fields to be sorted in the same way -// across segments) -func mergeFields(segments []*SegmentBase) (bool, []string) { - fieldsSame := true - - var segment0Fields []string - if len(segments) > 0 { - segment0Fields = segments[0].Fields() - } - - fieldsExist := map[string]struct{}{} - for _, segment := range segments { - fields := segment.Fields() - for fieldi, field := range fields { - fieldsExist[field] = struct{}{} - if len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field { - fieldsSame = false - } - } - } - - rv := make([]string, 0, len(fieldsExist)) - // ensure _id stays first - rv = append(rv, "_id") - for k := range fieldsExist { - if k != "_id" { - rv = append(rv, k) - } - } - - sort.Strings(rv[1:]) // leave _id as first - - return fieldsSame, rv -} - -func isClosed(closeCh chan struct{}) bool { - select { - case <-closeCh: - return true - default: - return false - } -} diff --git a/vendor/github.com/blevesearch/zapx/v12/new.go b/vendor/github.com/blevesearch/zapx/v12/new.go deleted file mode 100644 index b4e0d0341..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/new.go +++ /dev/null @@ -1,830 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "math" - "sort" - "sync" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - "github.com/golang/snappy" -) - -var NewSegmentBufferNumResultsBump int = 100 -var NewSegmentBufferNumResultsFactor float64 = 1.0 -var NewSegmentBufferAvgBytesPerDocFactor float64 = 1.0 - -// ValidateDocFields can be set by applications to perform additional checks -// on fields in a document being added to a new segment, by default it does -// nothing. -// This API is experimental and may be removed at any time. -var ValidateDocFields = func(field index.Field) error { - return nil -} - -// New creates an in-memory zap-encoded SegmentBase from a set of Documents -func (z *ZapPlugin) New(results []index.Document) ( - segment.Segment, uint64, error) { - return z.newWithChunkMode(results, DefaultChunkMode) -} - -func (*ZapPlugin) newWithChunkMode(results []index.Document, - chunkMode uint32) (segment.Segment, uint64, error) { - s := interimPool.Get().(*interim) - - var br bytes.Buffer - if s.lastNumDocs > 0 { - // use previous results to initialize the buf with an estimate - // size, but note that the interim instance comes from a - // global interimPool, so multiple scorch instances indexing - // different docs can lead to low quality estimates - estimateAvgBytesPerDoc := int(float64(s.lastOutSize/s.lastNumDocs) * - NewSegmentBufferNumResultsFactor) - estimateNumResults := int(float64(len(results)+NewSegmentBufferNumResultsBump) * - NewSegmentBufferAvgBytesPerDocFactor) - br.Grow(estimateAvgBytesPerDoc * estimateNumResults) - } - - s.results = results - s.chunkMode = chunkMode - s.w = NewCountHashWriter(&br) - - storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, - err := s.convert() - if err != nil { - return nil, uint64(0), err - } - - sb, err := InitSegmentBase(br.Bytes(), s.w.Sum32(), chunkMode, - s.FieldsMap, s.FieldsInv, uint64(len(results)), - storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets) - - if err == nil && s.reset() == nil { - s.lastNumDocs = len(results) - s.lastOutSize = len(br.Bytes()) - interimPool.Put(s) - } - - return sb, uint64(len(br.Bytes())), err -} - -var interimPool = sync.Pool{New: func() interface{} { return &interim{} }} - -// interim holds temporary working data used while converting from -// analysis results to a zap-encoded segment -type interim struct { - results []index.Document - - chunkMode uint32 - - w *CountHashWriter - - // FieldsMap adds 1 to field id to avoid zero value issues - // name -> field id + 1 - FieldsMap map[string]uint16 - - // FieldsInv is the inverse of FieldsMap - // field id -> name - FieldsInv []string - - // Term dictionaries for each field - // field id -> term -> postings list id + 1 - Dicts []map[string]uint64 - - // Terms for each field, where terms are sorted ascending - // field id -> []term - DictKeys [][]string - - // Fields whose IncludeDocValues is true - // field id -> bool - IncludeDocValues []bool - - // postings id -> bitmap of docNums - Postings []*roaring.Bitmap - - // postings id -> freq/norm's, one for each docNum in postings - FreqNorms [][]interimFreqNorm - freqNormsBacking []interimFreqNorm - - // postings id -> locs, one for each freq - Locs [][]interimLoc - locsBacking []interimLoc - - numTermsPerPostingsList []int // key is postings list id - numLocsPerPostingsList []int // key is postings list id - - builder *vellum.Builder - builderBuf bytes.Buffer - - metaBuf bytes.Buffer - - tmp0 []byte - tmp1 []byte - - lastNumDocs int - lastOutSize int -} - -func (s *interim) reset() (err error) { - s.results = nil - s.chunkMode = 0 - s.w = nil - s.FieldsMap = nil - s.FieldsInv = nil - for i := range s.Dicts { - s.Dicts[i] = nil - } - s.Dicts = s.Dicts[:0] - for i := range s.DictKeys { - s.DictKeys[i] = s.DictKeys[i][:0] - } - s.DictKeys = s.DictKeys[:0] - for i := range s.IncludeDocValues { - s.IncludeDocValues[i] = false - } - s.IncludeDocValues = s.IncludeDocValues[:0] - for _, idn := range s.Postings { - idn.Clear() - } - s.Postings = s.Postings[:0] - s.FreqNorms = s.FreqNorms[:0] - for i := range s.freqNormsBacking { - s.freqNormsBacking[i] = interimFreqNorm{} - } - s.freqNormsBacking = s.freqNormsBacking[:0] - s.Locs = s.Locs[:0] - for i := range s.locsBacking { - s.locsBacking[i] = interimLoc{} - } - s.locsBacking = s.locsBacking[:0] - s.numTermsPerPostingsList = s.numTermsPerPostingsList[:0] - s.numLocsPerPostingsList = s.numLocsPerPostingsList[:0] - s.builderBuf.Reset() - if s.builder != nil { - err = s.builder.Reset(&s.builderBuf) - } - s.metaBuf.Reset() - s.tmp0 = s.tmp0[:0] - s.tmp1 = s.tmp1[:0] - s.lastNumDocs = 0 - s.lastOutSize = 0 - - return err -} - -func (s *interim) grabBuf(size int) []byte { - buf := s.tmp0 - if cap(buf) < size { - buf = make([]byte, size) - s.tmp0 = buf - } - return buf[0:size] -} - -type interimStoredField struct { - vals [][]byte - typs []byte - arrayposs [][]uint64 // array positions -} - -type interimFreqNorm struct { - freq uint64 - norm float32 - numLocs int -} - -type interimLoc struct { - fieldID uint16 - pos uint64 - start uint64 - end uint64 - arrayposs []uint64 -} - -func (s *interim) convert() (uint64, uint64, uint64, []uint64, error) { - s.FieldsMap = map[string]uint16{} - - s.getOrDefineField("_id") // _id field is fieldID 0 - - for _, result := range s.results { - result.VisitComposite(func(field index.CompositeField) { - s.getOrDefineField(field.Name()) - }) - result.VisitFields(func(field index.Field) { - s.getOrDefineField(field.Name()) - }) - } - - sort.Strings(s.FieldsInv[1:]) // keep _id as first field - - for fieldID, fieldName := range s.FieldsInv { - s.FieldsMap[fieldName] = uint16(fieldID + 1) - } - - if cap(s.IncludeDocValues) >= len(s.FieldsInv) { - s.IncludeDocValues = s.IncludeDocValues[:len(s.FieldsInv)] - } else { - s.IncludeDocValues = make([]bool, len(s.FieldsInv)) - } - - s.prepareDicts() - - for _, dict := range s.DictKeys { - sort.Strings(dict) - } - - s.processDocuments() - - storedIndexOffset, err := s.writeStoredFields() - if err != nil { - return 0, 0, 0, nil, err - } - - var fdvIndexOffset uint64 - var dictOffsets []uint64 - - if len(s.results) > 0 { - fdvIndexOffset, dictOffsets, err = s.writeDicts() - if err != nil { - return 0, 0, 0, nil, err - } - } else { - dictOffsets = make([]uint64, len(s.FieldsInv)) - } - - fieldsIndexOffset, err := persistFields(s.FieldsInv, s.w, dictOffsets) - if err != nil { - return 0, 0, 0, nil, err - } - - return storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, nil -} - -func (s *interim) getOrDefineField(fieldName string) int { - fieldIDPlus1, exists := s.FieldsMap[fieldName] - if !exists { - fieldIDPlus1 = uint16(len(s.FieldsInv) + 1) - s.FieldsMap[fieldName] = fieldIDPlus1 - s.FieldsInv = append(s.FieldsInv, fieldName) - - s.Dicts = append(s.Dicts, make(map[string]uint64)) - - n := len(s.DictKeys) - if n < cap(s.DictKeys) { - s.DictKeys = s.DictKeys[:n+1] - s.DictKeys[n] = s.DictKeys[n][:0] - } else { - s.DictKeys = append(s.DictKeys, []string(nil)) - } - } - - return int(fieldIDPlus1 - 1) -} - -// fill Dicts and DictKeys from analysis results -func (s *interim) prepareDicts() { - var pidNext int - - var totTFs int - var totLocs int - - visitField := func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - - dict := s.Dicts[fieldID] - dictKeys := s.DictKeys[fieldID] - - tfs := field.AnalyzedTokenFrequencies() - for term, tf := range tfs { - pidPlus1, exists := dict[term] - if !exists { - pidNext++ - pidPlus1 = uint64(pidNext) - - dict[term] = pidPlus1 - dictKeys = append(dictKeys, term) - - s.numTermsPerPostingsList = append(s.numTermsPerPostingsList, 0) - s.numLocsPerPostingsList = append(s.numLocsPerPostingsList, 0) - } - - pid := pidPlus1 - 1 - - s.numTermsPerPostingsList[pid] += 1 - s.numLocsPerPostingsList[pid] += len(tf.Locations) - - totLocs += len(tf.Locations) - } - - totTFs += len(tfs) - - s.DictKeys[fieldID] = dictKeys - } - - for _, result := range s.results { - // walk each composite field - result.VisitComposite(func(field index.CompositeField) { - visitField(field) - }) - - // walk each field - result.VisitFields(visitField) - } - - numPostingsLists := pidNext - - if cap(s.Postings) >= numPostingsLists { - s.Postings = s.Postings[:numPostingsLists] - } else { - postings := make([]*roaring.Bitmap, numPostingsLists) - copy(postings, s.Postings[:cap(s.Postings)]) - for i := 0; i < numPostingsLists; i++ { - if postings[i] == nil { - postings[i] = roaring.New() - } - } - s.Postings = postings - } - - if cap(s.FreqNorms) >= numPostingsLists { - s.FreqNorms = s.FreqNorms[:numPostingsLists] - } else { - s.FreqNorms = make([][]interimFreqNorm, numPostingsLists) - } - - if cap(s.freqNormsBacking) >= totTFs { - s.freqNormsBacking = s.freqNormsBacking[:totTFs] - } else { - s.freqNormsBacking = make([]interimFreqNorm, totTFs) - } - - freqNormsBacking := s.freqNormsBacking - for pid, numTerms := range s.numTermsPerPostingsList { - s.FreqNorms[pid] = freqNormsBacking[0:0] - freqNormsBacking = freqNormsBacking[numTerms:] - } - - if cap(s.Locs) >= numPostingsLists { - s.Locs = s.Locs[:numPostingsLists] - } else { - s.Locs = make([][]interimLoc, numPostingsLists) - } - - if cap(s.locsBacking) >= totLocs { - s.locsBacking = s.locsBacking[:totLocs] - } else { - s.locsBacking = make([]interimLoc, totLocs) - } - - locsBacking := s.locsBacking - for pid, numLocs := range s.numLocsPerPostingsList { - s.Locs[pid] = locsBacking[0:0] - locsBacking = locsBacking[numLocs:] - } -} - -func (s *interim) processDocuments() { - numFields := len(s.FieldsInv) - reuseFieldLens := make([]int, numFields) - reuseFieldTFs := make([]index.TokenFrequencies, numFields) - - for docNum, result := range s.results { - for i := 0; i < numFields; i++ { // clear these for reuse - reuseFieldLens[i] = 0 - reuseFieldTFs[i] = nil - } - - s.processDocument(uint64(docNum), result, - reuseFieldLens, reuseFieldTFs) - } -} - -func (s *interim) processDocument(docNum uint64, - result index.Document, - fieldLens []int, fieldTFs []index.TokenFrequencies) { - visitField := func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - fieldLens[fieldID] += field.AnalyzedLength() - - existingFreqs := fieldTFs[fieldID] - if existingFreqs != nil { - existingFreqs.MergeAll(field.Name(), field.AnalyzedTokenFrequencies()) - } else { - fieldTFs[fieldID] = field.AnalyzedTokenFrequencies() - } - } - - // walk each composite field - result.VisitComposite(func(field index.CompositeField) { - visitField(field) - }) - - // walk each field - result.VisitFields(visitField) - - // now that it's been rolled up into fieldTFs, walk that - for fieldID, tfs := range fieldTFs { - dict := s.Dicts[fieldID] - norm := float32(1.0 / math.Sqrt(float64(fieldLens[fieldID]))) - - for term, tf := range tfs { - pid := dict[term] - 1 - bs := s.Postings[pid] - bs.Add(uint32(docNum)) - - s.FreqNorms[pid] = append(s.FreqNorms[pid], - interimFreqNorm{ - freq: uint64(tf.Frequency()), - norm: norm, - numLocs: len(tf.Locations), - }) - - if len(tf.Locations) > 0 { - locs := s.Locs[pid] - - for _, loc := range tf.Locations { - var locf = uint16(fieldID) - if loc.Field != "" { - locf = uint16(s.getOrDefineField(loc.Field)) - } - var arrayposs []uint64 - if len(loc.ArrayPositions) > 0 { - arrayposs = loc.ArrayPositions - } - locs = append(locs, interimLoc{ - fieldID: locf, - pos: uint64(loc.Position), - start: uint64(loc.Start), - end: uint64(loc.End), - arrayposs: arrayposs, - }) - } - - s.Locs[pid] = locs - } - } - } -} - -func (s *interim) writeStoredFields() ( - storedIndexOffset uint64, err error) { - varBuf := make([]byte, binary.MaxVarintLen64) - metaEncode := func(val uint64) (int, error) { - wb := binary.PutUvarint(varBuf, val) - return s.metaBuf.Write(varBuf[:wb]) - } - - data, compressed := s.tmp0[:0], s.tmp1[:0] - defer func() { s.tmp0, s.tmp1 = data, compressed }() - - // keyed by docNum - docStoredOffsets := make([]uint64, len(s.results)) - - // keyed by fieldID, for the current doc in the loop - docStoredFields := map[uint16]interimStoredField{} - - for docNum, result := range s.results { - for fieldID := range docStoredFields { // reset for next doc - delete(docStoredFields, fieldID) - } - - var validationErr error - result.VisitFields(func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - - if field.Options().IsStored() { - isf := docStoredFields[fieldID] - isf.vals = append(isf.vals, field.Value()) - isf.typs = append(isf.typs, field.EncodedFieldType()) - isf.arrayposs = append(isf.arrayposs, field.ArrayPositions()) - docStoredFields[fieldID] = isf - } - - if field.Options().IncludeDocValues() { - s.IncludeDocValues[fieldID] = true - } - - err := ValidateDocFields(field) - if err != nil && validationErr == nil { - validationErr = err - } - }) - if validationErr != nil { - return 0, validationErr - } - - var curr int - - s.metaBuf.Reset() - data = data[:0] - - // _id field special case optimizes ExternalID() lookups - idFieldVal := docStoredFields[uint16(0)].vals[0] - _, err = metaEncode(uint64(len(idFieldVal))) - if err != nil { - return 0, err - } - - // handle non-"_id" fields - for fieldID := 1; fieldID < len(s.FieldsInv); fieldID++ { - isf, exists := docStoredFields[uint16(fieldID)] - if exists { - curr, data, err = persistStoredFieldValues( - fieldID, isf.vals, isf.typs, isf.arrayposs, - curr, metaEncode, data) - if err != nil { - return 0, err - } - } - } - - metaBytes := s.metaBuf.Bytes() - - compressed = snappy.Encode(compressed[:cap(compressed)], data) - - docStoredOffsets[docNum] = uint64(s.w.Count()) - - _, err := writeUvarints(s.w, - uint64(len(metaBytes)), - uint64(len(idFieldVal)+len(compressed))) - if err != nil { - return 0, err - } - - _, err = s.w.Write(metaBytes) - if err != nil { - return 0, err - } - - _, err = s.w.Write(idFieldVal) - if err != nil { - return 0, err - } - - _, err = s.w.Write(compressed) - if err != nil { - return 0, err - } - } - - storedIndexOffset = uint64(s.w.Count()) - - for _, docStoredOffset := range docStoredOffsets { - err = binary.Write(s.w, binary.BigEndian, docStoredOffset) - if err != nil { - return 0, err - } - } - - return storedIndexOffset, nil -} - -func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err error) { - dictOffsets = make([]uint64, len(s.FieldsInv)) - - fdvOffsetsStart := make([]uint64, len(s.FieldsInv)) - fdvOffsetsEnd := make([]uint64, len(s.FieldsInv)) - - buf := s.grabBuf(binary.MaxVarintLen64) - - // these int coders are initialized with chunk size 1024 - // however this will be reset to the correct chunk size - // while processing each individual field-term section - tfEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1)) - locEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1)) - - var docTermMap [][]byte - - if s.builder == nil { - s.builder, err = vellum.New(&s.builderBuf, nil) - if err != nil { - return 0, nil, err - } - } - - for fieldID, terms := range s.DictKeys { - if cap(docTermMap) < len(s.results) { - docTermMap = make([][]byte, len(s.results)) - } else { - docTermMap = docTermMap[0:len(s.results)] - for docNum := range docTermMap { // reset the docTermMap - docTermMap[docNum] = docTermMap[docNum][:0] - } - } - - dict := s.Dicts[fieldID] - - for _, term := range terms { // terms are already sorted - pid := dict[term] - 1 - - postingsBS := s.Postings[pid] - - freqNorms := s.FreqNorms[pid] - freqNormOffset := 0 - - locs := s.Locs[pid] - locOffset := 0 - - chunkSize, err := getChunkSize(s.chunkMode, postingsBS.GetCardinality(), uint64(len(s.results))) - if err != nil { - return 0, nil, err - } - tfEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) - locEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) - - postingsItr := postingsBS.Iterator() - for postingsItr.HasNext() { - docNum := uint64(postingsItr.Next()) - - freqNorm := freqNorms[freqNormOffset] - - err = tfEncoder.Add(docNum, - encodeFreqHasLocs(freqNorm.freq, freqNorm.numLocs > 0), - uint64(math.Float32bits(freqNorm.norm))) - if err != nil { - return 0, nil, err - } - - if freqNorm.numLocs > 0 { - numBytesLocs := 0 - for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { - numBytesLocs += totalUvarintBytes( - uint64(loc.fieldID), loc.pos, loc.start, loc.end, - uint64(len(loc.arrayposs)), loc.arrayposs) - } - - err = locEncoder.Add(docNum, uint64(numBytesLocs)) - if err != nil { - return 0, nil, err - } - - for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { - err = locEncoder.Add(docNum, - uint64(loc.fieldID), loc.pos, loc.start, loc.end, - uint64(len(loc.arrayposs))) - if err != nil { - return 0, nil, err - } - - err = locEncoder.Add(docNum, loc.arrayposs...) - if err != nil { - return 0, nil, err - } - } - - locOffset += freqNorm.numLocs - } - - freqNormOffset++ - - docTermMap[docNum] = append( - append(docTermMap[docNum], term...), - termSeparator) - } - - tfEncoder.Close() - locEncoder.Close() - - postingsOffset, err := - writePostings(postingsBS, tfEncoder, locEncoder, nil, s.w, buf) - if err != nil { - return 0, nil, err - } - - if postingsOffset > uint64(0) { - err = s.builder.Insert([]byte(term), postingsOffset) - if err != nil { - return 0, nil, err - } - } - - tfEncoder.Reset() - locEncoder.Reset() - } - - err = s.builder.Close() - if err != nil { - return 0, nil, err - } - - // record where this dictionary starts - dictOffsets[fieldID] = uint64(s.w.Count()) - - vellumData := s.builderBuf.Bytes() - - // write out the length of the vellum data - n := binary.PutUvarint(buf, uint64(len(vellumData))) - _, err = s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - - // write this vellum to disk - _, err = s.w.Write(vellumData) - if err != nil { - return 0, nil, err - } - - // reset vellum for reuse - s.builderBuf.Reset() - - err = s.builder.Reset(&s.builderBuf) - if err != nil { - return 0, nil, err - } - - // write the field doc values - // NOTE: doc values continue to use legacy chunk mode - chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0) - if err != nil { - return 0, nil, err - } - fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false) - if s.IncludeDocValues[fieldID] { - for docNum, docTerms := range docTermMap { - if len(docTerms) > 0 { - err = fdvEncoder.Add(uint64(docNum), docTerms) - if err != nil { - return 0, nil, err - } - } - } - err = fdvEncoder.Close() - if err != nil { - return 0, nil, err - } - - fdvOffsetsStart[fieldID] = uint64(s.w.Count()) - - _, err = fdvEncoder.Write() - if err != nil { - return 0, nil, err - } - - fdvOffsetsEnd[fieldID] = uint64(s.w.Count()) - - fdvEncoder.Reset() - } else { - fdvOffsetsStart[fieldID] = fieldNotUninverted - fdvOffsetsEnd[fieldID] = fieldNotUninverted - } - } - - fdvIndexOffset = uint64(s.w.Count()) - - for i := 0; i < len(fdvOffsetsStart); i++ { - n := binary.PutUvarint(buf, fdvOffsetsStart[i]) - _, err := s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - n = binary.PutUvarint(buf, fdvOffsetsEnd[i]) - _, err = s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - } - - return fdvIndexOffset, dictOffsets, nil -} - -// returns the total # of bytes needed to encode the given uint64's -// into binary.PutUVarint() encoding -func totalUvarintBytes(a, b, c, d, e uint64, more []uint64) (n int) { - n = numUvarintBytes(a) - n += numUvarintBytes(b) - n += numUvarintBytes(c) - n += numUvarintBytes(d) - n += numUvarintBytes(e) - for _, v := range more { - n += numUvarintBytes(v) - } - return n -} - -// returns # of bytes needed to encode x in binary.PutUvarint() encoding -func numUvarintBytes(x uint64) (n int) { - for x >= 0x80 { - x >>= 7 - n++ - } - return n + 1 -} diff --git a/vendor/github.com/blevesearch/zapx/v12/posting.go b/vendor/github.com/blevesearch/zapx/v12/posting.go deleted file mode 100644 index d6c61a42c..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/posting.go +++ /dev/null @@ -1,796 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "fmt" - "math" - "reflect" - - "github.com/RoaringBitmap/roaring" - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -var reflectStaticSizePostingsList int -var reflectStaticSizePostingsIterator int -var reflectStaticSizePosting int -var reflectStaticSizeLocation int - -func init() { - var pl PostingsList - reflectStaticSizePostingsList = int(reflect.TypeOf(pl).Size()) - var pi PostingsIterator - reflectStaticSizePostingsIterator = int(reflect.TypeOf(pi).Size()) - var p Posting - reflectStaticSizePosting = int(reflect.TypeOf(p).Size()) - var l Location - reflectStaticSizeLocation = int(reflect.TypeOf(l).Size()) -} - -// FST or vellum value (uint64) encoding is determined by the top two -// highest-order or most significant bits... -// -// encoding : MSB -// name : 63 62 61...to...bit #0 (LSB) -// ----------+---+---+--------------------------------------------------- -// general : 0 | 0 | 62-bits of postingsOffset. -// ~ : 0 | 1 | reserved for future. -// 1-hit : 1 | 0 | 31-bits of positive float31 norm | 31-bits docNum. -// ~ : 1 | 1 | reserved for future. -// -// Encoding "general" is able to handle all cases, where the -// postingsOffset points to more information about the postings for -// the term. -// -// Encoding "1-hit" is used to optimize a commonly seen case when a -// term has only a single hit. For example, a term in the _id field -// will have only 1 hit. The "1-hit" encoding is used for a term -// in a field when... -// -// - term vector info is disabled for that field; -// - and, the term appears in only a single doc for that field; -// - and, the term's freq is exactly 1 in that single doc for that field; -// - and, the docNum must fit into 31-bits; -// -// Otherwise, the "general" encoding is used instead. -// -// In the "1-hit" encoding, the field in that single doc may have -// other terms, which is supported in the "1-hit" encoding by the -// positive float31 norm. - -const FSTValEncodingMask = uint64(0xc000000000000000) -const FSTValEncodingGeneral = uint64(0x0000000000000000) -const FSTValEncoding1Hit = uint64(0x8000000000000000) - -func FSTValEncode1Hit(docNum uint64, normBits uint64) uint64 { - return FSTValEncoding1Hit | ((mask31Bits & normBits) << 31) | (mask31Bits & docNum) -} - -func FSTValDecode1Hit(v uint64) (docNum uint64, normBits uint64) { - return (mask31Bits & v), (mask31Bits & (v >> 31)) -} - -const mask31Bits = uint64(0x000000007fffffff) - -func under32Bits(x uint64) bool { - return x <= mask31Bits -} - -const DocNum1HitFinished = math.MaxUint64 - -var NormBits1Hit = uint64(math.Float32bits(float32(1))) - -// PostingsList is an in-memory representation of a postings list -type PostingsList struct { - sb *SegmentBase - postingsOffset uint64 - freqOffset uint64 - locOffset uint64 - postings *roaring.Bitmap - except *roaring.Bitmap - - // when normBits1Hit != 0, then this postings list came from a - // 1-hit encoding, and only the docNum1Hit & normBits1Hit apply - docNum1Hit uint64 - normBits1Hit uint64 -} - -// represents an immutable, empty postings list -var emptyPostingsList = &PostingsList{} - -func (p *PostingsList) Size() int { - sizeInBytes := reflectStaticSizePostingsList + SizeOfPtr - - if p.except != nil { - sizeInBytes += int(p.except.GetSizeInBytes()) - } - - return sizeInBytes -} - -func (p *PostingsList) OrInto(receiver *roaring.Bitmap) { - if p.normBits1Hit != 0 { - receiver.Add(uint32(p.docNum1Hit)) - return - } - - if p.postings != nil { - receiver.Or(p.postings) - } -} - -// Iterator returns an iterator for this postings list -func (p *PostingsList) Iterator(includeFreq, includeNorm, includeLocs bool, - prealloc segment.PostingsIterator) segment.PostingsIterator { - if p.normBits1Hit == 0 && p.postings == nil { - return emptyPostingsIterator - } - - var preallocPI *PostingsIterator - pi, ok := prealloc.(*PostingsIterator) - if ok && pi != nil { - preallocPI = pi - } - if preallocPI == emptyPostingsIterator { - preallocPI = nil - } - - return p.iterator(includeFreq, includeNorm, includeLocs, preallocPI) -} - -func (p *PostingsList) iterator(includeFreq, includeNorm, includeLocs bool, - rv *PostingsIterator) *PostingsIterator { - if rv == nil { - rv = &PostingsIterator{} - } else { - freqNormReader := rv.freqNormReader - if freqNormReader != nil { - freqNormReader.reset() - } - - locReader := rv.locReader - if locReader != nil { - locReader.reset() - } - - nextLocs := rv.nextLocs[:0] - nextSegmentLocs := rv.nextSegmentLocs[:0] - - buf := rv.buf - - *rv = PostingsIterator{} // clear the struct - - rv.freqNormReader = freqNormReader - rv.locReader = locReader - - rv.nextLocs = nextLocs - rv.nextSegmentLocs = nextSegmentLocs - - rv.buf = buf - } - - rv.postings = p - rv.includeFreqNorm = includeFreq || includeNorm || includeLocs - rv.includeLocs = includeLocs - - if p.normBits1Hit != 0 { - // "1-hit" encoding - rv.docNum1Hit = p.docNum1Hit - rv.normBits1Hit = p.normBits1Hit - - if p.except != nil && p.except.Contains(uint32(rv.docNum1Hit)) { - rv.docNum1Hit = DocNum1HitFinished - } - - return rv - } - - // "general" encoding, check if empty - if p.postings == nil { - return rv - } - - // initialize freq chunk reader - if rv.includeFreqNorm { - rv.freqNormReader = newChunkedIntDecoder(p.sb.mem, p.freqOffset) - } - - // initialize the loc chunk reader - if rv.includeLocs { - rv.locReader = newChunkedIntDecoder(p.sb.mem, p.locOffset) - } - - rv.all = p.postings.Iterator() - if p.except != nil { - rv.ActualBM = roaring.AndNot(p.postings, p.except) - rv.Actual = rv.ActualBM.Iterator() - } else { - rv.ActualBM = p.postings - rv.Actual = rv.all // Optimize to use same iterator for all & Actual. - } - - return rv -} - -// Count returns the number of items on this postings list -func (p *PostingsList) Count() uint64 { - var n, e uint64 - if p.normBits1Hit != 0 { - n = 1 - if p.except != nil && p.except.Contains(uint32(p.docNum1Hit)) { - e = 1 - } - } else if p.postings != nil { - n = p.postings.GetCardinality() - if p.except != nil { - e = p.postings.AndCardinality(p.except) - } - } - return n - e -} - -func (rv *PostingsList) read(postingsOffset uint64, d *Dictionary) error { - rv.postingsOffset = postingsOffset - - // handle "1-hit" encoding special case - if rv.postingsOffset&FSTValEncodingMask == FSTValEncoding1Hit { - return rv.init1Hit(postingsOffset) - } - - // read the location of the freq/norm details - var n uint64 - var read int - - rv.freqOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+binary.MaxVarintLen64]) - n += uint64(read) - - rv.locOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - var postingsLen uint64 - postingsLen, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - roaringBytes := d.sb.mem[postingsOffset+n : postingsOffset+n+postingsLen] - - if rv.postings == nil { - rv.postings = roaring.NewBitmap() - } - _, err := rv.postings.FromBuffer(roaringBytes) - if err != nil { - return fmt.Errorf("error loading roaring bitmap: %v", err) - } - - return nil -} - -func (rv *PostingsList) init1Hit(fstVal uint64) error { - docNum, normBits := FSTValDecode1Hit(fstVal) - - rv.docNum1Hit = docNum - rv.normBits1Hit = normBits - - return nil -} - -// PostingsIterator provides a way to iterate through the postings list -type PostingsIterator struct { - postings *PostingsList - all roaring.IntPeekable - Actual roaring.IntPeekable - ActualBM *roaring.Bitmap - - currChunk uint32 - freqNormReader *chunkedIntDecoder - locReader *chunkedIntDecoder - - next Posting // reused across Next() calls - nextLocs []Location // reused across Next() calls - nextSegmentLocs []segment.Location // reused across Next() calls - - docNum1Hit uint64 - normBits1Hit uint64 - - buf []byte - - includeFreqNorm bool - includeLocs bool -} - -var emptyPostingsIterator = &PostingsIterator{} - -func (i *PostingsIterator) Size() int { - sizeInBytes := reflectStaticSizePostingsIterator + SizeOfPtr + - i.next.Size() - // account for freqNormReader, locReader if we start using this. - for _, entry := range i.nextLocs { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -func (i *PostingsIterator) loadChunk(chunk int) error { - if i.includeFreqNorm { - err := i.freqNormReader.loadChunk(chunk) - if err != nil { - return err - } - } - - if i.includeLocs { - err := i.locReader.loadChunk(chunk) - if err != nil { - return err - } - } - - i.currChunk = uint32(chunk) - return nil -} - -func (i *PostingsIterator) readFreqNormHasLocs() (uint64, uint64, bool, error) { - if i.normBits1Hit != 0 { - return 1, i.normBits1Hit, false, nil - } - - freqHasLocs, err := i.freqNormReader.readUvarint() - if err != nil { - return 0, 0, false, fmt.Errorf("error reading frequency: %v", err) - } - - freq, hasLocs := decodeFreqHasLocs(freqHasLocs) - - normBits, err := i.freqNormReader.readUvarint() - if err != nil { - return 0, 0, false, fmt.Errorf("error reading norm: %v", err) - } - - return freq, normBits, hasLocs, nil -} - -func (i *PostingsIterator) skipFreqNormReadHasLocs() (bool, error) { - if i.normBits1Hit != 0 { - return false, nil - } - - freqHasLocs, err := i.freqNormReader.readUvarint() - if err != nil { - return false, fmt.Errorf("error reading freqHasLocs: %v", err) - } - - i.freqNormReader.SkipUvarint() // Skip normBits. - - return freqHasLocs&0x01 != 0, nil // See decodeFreqHasLocs() / hasLocs. -} - -func encodeFreqHasLocs(freq uint64, hasLocs bool) uint64 { - rv := freq << 1 - if hasLocs { - rv = rv | 0x01 // 0'th LSB encodes whether there are locations - } - return rv -} - -func decodeFreqHasLocs(freqHasLocs uint64) (uint64, bool) { - freq := freqHasLocs >> 1 - hasLocs := freqHasLocs&0x01 != 0 - return freq, hasLocs -} - -// readLocation processes all the integers on the stream representing a single -// location. -func (i *PostingsIterator) readLocation(l *Location) error { - // read off field - fieldID, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location field: %v", err) - } - // read off pos - pos, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location pos: %v", err) - } - // read off start - start, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location start: %v", err) - } - // read off end - end, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location end: %v", err) - } - // read off num array pos - numArrayPos, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location num array pos: %v", err) - } - - l.field = i.postings.sb.fieldsInv[fieldID] - l.pos = pos - l.start = start - l.end = end - - if cap(l.ap) < int(numArrayPos) { - l.ap = make([]uint64, int(numArrayPos)) - } else { - l.ap = l.ap[:int(numArrayPos)] - } - - // read off array positions - for k := 0; k < int(numArrayPos); k++ { - ap, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading array position: %v", err) - } - - l.ap[k] = ap - } - - return nil -} - -// Next returns the next posting on the postings list, or nil at the end -func (i *PostingsIterator) Next() (segment.Posting, error) { - return i.nextAtOrAfter(0) -} - -// Advance returns the posting at the specified docNum or it is not present -// the next posting, or if the end is reached, nil -func (i *PostingsIterator) Advance(docNum uint64) (segment.Posting, error) { - return i.nextAtOrAfter(docNum) -} - -// Next returns the next posting on the postings list, or nil at the end -func (i *PostingsIterator) nextAtOrAfter(atOrAfter uint64) (segment.Posting, error) { - docNum, exists, err := i.nextDocNumAtOrAfter(atOrAfter) - if err != nil || !exists { - return nil, err - } - - i.next = Posting{} // clear the struct - rv := &i.next - rv.docNum = docNum - - if !i.includeFreqNorm { - return rv, nil - } - - var normBits uint64 - var hasLocs bool - - rv.freq, normBits, hasLocs, err = i.readFreqNormHasLocs() - if err != nil { - return nil, err - } - - rv.norm = math.Float32frombits(uint32(normBits)) - - if i.includeLocs && hasLocs { - // prepare locations into reused slices, where we assume - // rv.freq >= "number of locs", since in a composite field, - // some component fields might have their IncludeTermVector - // flags disabled while other component fields are enabled - if cap(i.nextLocs) >= int(rv.freq) { - i.nextLocs = i.nextLocs[0:rv.freq] - } else { - i.nextLocs = make([]Location, rv.freq, rv.freq*2) - } - if cap(i.nextSegmentLocs) < int(rv.freq) { - i.nextSegmentLocs = make([]segment.Location, rv.freq, rv.freq*2) - } - rv.locs = i.nextSegmentLocs[:0] - - numLocsBytes, err := i.locReader.readUvarint() - if err != nil { - return nil, fmt.Errorf("error reading location numLocsBytes: %v", err) - } - - j := 0 - startBytesRemaining := i.locReader.Len() // # bytes remaining in the locReader - for startBytesRemaining-i.locReader.Len() < int(numLocsBytes) { - err := i.readLocation(&i.nextLocs[j]) - if err != nil { - return nil, err - } - rv.locs = append(rv.locs, &i.nextLocs[j]) - j++ - } - } - - return rv, nil -} - -// nextDocNum returns the next docNum on the postings list, and also -// sets up the currChunk / loc related fields of the iterator. -func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool, error) { - if i.normBits1Hit != 0 { - if i.docNum1Hit == DocNum1HitFinished { - return 0, false, nil - } - if i.docNum1Hit < atOrAfter { - // advanced past our 1-hit - i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum - return 0, false, nil - } - docNum := i.docNum1Hit - i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum - return docNum, true, nil - } - - if i.Actual == nil || !i.Actual.HasNext() { - return 0, false, nil - } - - if i.postings == nil || i.postings.postings == i.ActualBM { - return i.nextDocNumAtOrAfterClean(atOrAfter) - } - - i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) - - if !i.Actual.HasNext() { - // couldn't find anything - return 0, false, nil - } - - n := i.Actual.Next() - allN := i.all.Next() - - chunkSize, err := getChunkSize(i.postings.sb.chunkMode, i.postings.postings.GetCardinality(), i.postings.sb.numDocs) - if err != nil { - return 0, false, err - } - nChunk := n / uint32(chunkSize) - - // when allN becomes >= to here, then allN is in the same chunk as nChunk. - allNReachesNChunk := nChunk * uint32(chunkSize) - - // n is the next actual hit (excluding some postings), and - // allN is the next hit in the full postings, and - // if they don't match, move 'all' forwards until they do - for allN != n { - // we've reached same chunk, so move the freq/norm/loc decoders forward - if i.includeFreqNorm && allN >= allNReachesNChunk { - err := i.currChunkNext(nChunk) - if err != nil { - return 0, false, err - } - } - - allN = i.all.Next() - } - - if i.includeFreqNorm && (i.currChunk != nChunk || i.freqNormReader.isNil()) { - err := i.loadChunk(int(nChunk)) - if err != nil { - return 0, false, fmt.Errorf("error loading chunk: %v", err) - } - } - - return uint64(n), true, nil -} - -// optimization when the postings list is "clean" (e.g., no updates & -// no deletions) where the all bitmap is the same as the actual bitmap -func (i *PostingsIterator) nextDocNumAtOrAfterClean( - atOrAfter uint64) (uint64, bool, error) { - if !i.includeFreqNorm { - i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) - - if !i.Actual.HasNext() { - return 0, false, nil // couldn't find anything - } - - return uint64(i.Actual.Next()), true, nil - } - - chunkSize, err := getChunkSize(i.postings.sb.chunkMode, i.postings.postings.GetCardinality(), i.postings.sb.numDocs) - if err != nil { - return 0, false, err - } - - // freq-norm's needed, so maintain freq-norm chunk reader - sameChunkNexts := 0 // # of times we called Next() in the same chunk - n := i.Actual.Next() - nChunk := n / uint32(chunkSize) - - for uint64(n) < atOrAfter && i.Actual.HasNext() { - n = i.Actual.Next() - - nChunkPrev := nChunk - nChunk = n / uint32(chunkSize) - - if nChunk != nChunkPrev { - sameChunkNexts = 0 - } else { - sameChunkNexts += 1 - } - } - - if uint64(n) < atOrAfter { - // couldn't find anything - return 0, false, nil - } - - for j := 0; j < sameChunkNexts; j++ { - err := i.currChunkNext(nChunk) - if err != nil { - return 0, false, fmt.Errorf("error optimized currChunkNext: %v", err) - } - } - - if i.currChunk != nChunk || i.freqNormReader.isNil() { - err := i.loadChunk(int(nChunk)) - if err != nil { - return 0, false, fmt.Errorf("error loading chunk: %v", err) - } - } - - return uint64(n), true, nil -} - -func (i *PostingsIterator) currChunkNext(nChunk uint32) error { - if i.currChunk != nChunk || i.freqNormReader.isNil() { - err := i.loadChunk(int(nChunk)) - if err != nil { - return fmt.Errorf("error loading chunk: %v", err) - } - } - - // read off freq/offsets even though we don't care about them - hasLocs, err := i.skipFreqNormReadHasLocs() - if err != nil { - return err - } - - if i.includeLocs && hasLocs { - numLocsBytes, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location numLocsBytes: %v", err) - } - - // skip over all the location bytes - i.locReader.SkipBytes(int(numLocsBytes)) - } - - return nil -} - -// DocNum1Hit returns the docNum and true if this is "1-hit" optimized -// and the docNum is available. -func (p *PostingsIterator) DocNum1Hit() (uint64, bool) { - if p.normBits1Hit != 0 && p.docNum1Hit != DocNum1HitFinished { - return p.docNum1Hit, true - } - return 0, false -} - -// ActualBitmap returns the underlying actual bitmap -// which can be used up the stack for optimizations -func (p *PostingsIterator) ActualBitmap() *roaring.Bitmap { - return p.ActualBM -} - -// ReplaceActual replaces the ActualBM with the provided -// bitmap -func (p *PostingsIterator) ReplaceActual(abm *roaring.Bitmap) { - p.ActualBM = abm - p.Actual = abm.Iterator() -} - -// PostingsIteratorFromBitmap constructs a PostingsIterator given an -// "actual" bitmap. -func PostingsIteratorFromBitmap(bm *roaring.Bitmap, - includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { - return &PostingsIterator{ - ActualBM: bm, - Actual: bm.Iterator(), - includeFreqNorm: includeFreqNorm, - includeLocs: includeLocs, - }, nil -} - -// PostingsIteratorFrom1Hit constructs a PostingsIterator given a -// 1-hit docNum. -func PostingsIteratorFrom1Hit(docNum1Hit uint64, - includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { - return &PostingsIterator{ - docNum1Hit: docNum1Hit, - normBits1Hit: NormBits1Hit, - includeFreqNorm: includeFreqNorm, - includeLocs: includeLocs, - }, nil -} - -// Posting is a single entry in a postings list -type Posting struct { - docNum uint64 - freq uint64 - norm float32 - locs []segment.Location -} - -func (p *Posting) Size() int { - sizeInBytes := reflectStaticSizePosting - - for _, entry := range p.locs { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -// Number returns the document number of this posting in this segment -func (p *Posting) Number() uint64 { - return p.docNum -} - -// Frequency returns the frequencies of occurrence of this term in this doc/field -func (p *Posting) Frequency() uint64 { - return p.freq -} - -// Norm returns the normalization factor for this posting -func (p *Posting) Norm() float64 { - return float64(p.norm) -} - -// Locations returns the location information for each occurrence -func (p *Posting) Locations() []segment.Location { - return p.locs -} - -// Location represents the location of a single occurrence -type Location struct { - field string - pos uint64 - start uint64 - end uint64 - ap []uint64 -} - -func (l *Location) Size() int { - return reflectStaticSizeLocation + - len(l.field) + - len(l.ap)*SizeOfUint64 -} - -// Field returns the name of the field (useful in composite fields to know -// which original field the value came from) -func (l *Location) Field() string { - return l.field -} - -// Start returns the start byte offset of this occurrence -func (l *Location) Start() uint64 { - return l.start -} - -// End returns the end byte offset of this occurrence -func (l *Location) End() uint64 { - return l.end -} - -// Pos returns the 1-based phrase position of this occurrence -func (l *Location) Pos() uint64 { - return l.pos -} - -// ArrayPositions returns the array position vector associated with this occurrence -func (l *Location) ArrayPositions() []uint64 { - return l.ap -} diff --git a/vendor/github.com/blevesearch/zapx/v12/read.go b/vendor/github.com/blevesearch/zapx/v12/read.go deleted file mode 100644 index e47d4c6ab..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/read.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import "encoding/binary" - -func (s *SegmentBase) getDocStoredMetaAndCompressed(docNum uint64) ([]byte, []byte) { - _, storedOffset, n, metaLen, dataLen := s.getDocStoredOffsets(docNum) - - meta := s.mem[storedOffset+n : storedOffset+n+metaLen] - data := s.mem[storedOffset+n+metaLen : storedOffset+n+metaLen+dataLen] - - return meta, data -} - -func (s *SegmentBase) getDocStoredOffsets(docNum uint64) ( - uint64, uint64, uint64, uint64, uint64) { - indexOffset := s.storedIndexOffset + (8 * docNum) - - storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) - - var n uint64 - - metaLen, read := binary.Uvarint(s.mem[storedOffset : storedOffset+binary.MaxVarintLen64]) - n += uint64(read) - - dataLen, read := binary.Uvarint(s.mem[storedOffset+n : storedOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - return indexOffset, storedOffset, n, metaLen, dataLen -} diff --git a/vendor/github.com/blevesearch/zapx/v12/segment.go b/vendor/github.com/blevesearch/zapx/v12/segment.go deleted file mode 100644 index 6317ad403..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/segment.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "os" - "sync" - "unsafe" - - "github.com/RoaringBitmap/roaring" - mmap "github.com/blevesearch/mmap-go" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - "github.com/golang/snappy" -) - -var reflectStaticSizeSegmentBase int - -func init() { - var sb SegmentBase - reflectStaticSizeSegmentBase = int(unsafe.Sizeof(sb)) -} - -// Open returns a zap impl of a segment -func (*ZapPlugin) Open(path string) (segment.Segment, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - mm, err := mmap.Map(f, mmap.RDONLY, 0) - if err != nil { - // mmap failed, try to close the file - _ = f.Close() - return nil, err - } - - rv := &Segment{ - SegmentBase: SegmentBase{ - mem: mm[0 : len(mm)-FooterSize], - fieldsMap: make(map[string]uint16), - fieldDvReaders: make(map[uint16]*docValueReader), - fieldFSTs: make(map[uint16]*vellum.FST), - }, - f: f, - mm: mm, - path: path, - refs: 1, - } - rv.SegmentBase.updateSize() - - err = rv.loadConfig() - if err != nil { - _ = rv.Close() - return nil, err - } - - err = rv.loadFields() - if err != nil { - _ = rv.Close() - return nil, err - } - - err = rv.loadDvReaders() - if err != nil { - _ = rv.Close() - return nil, err - } - - return rv, nil -} - -// SegmentBase is a memory only, read-only implementation of the -// segment.Segment interface, using zap's data representation. -type SegmentBase struct { - mem []byte - memCRC uint32 - chunkMode uint32 - fieldsMap map[string]uint16 // fieldName -> fieldID+1 - fieldsInv []string // fieldID -> fieldName - numDocs uint64 - storedIndexOffset uint64 - fieldsIndexOffset uint64 - docValueOffset uint64 - dictLocs []uint64 - fieldDvReaders map[uint16]*docValueReader // naive chunk cache per field - fieldDvNames []string // field names cached in fieldDvReaders - size uint64 - - m sync.Mutex - fieldFSTs map[uint16]*vellum.FST -} - -func (sb *SegmentBase) Size() int { - return int(sb.size) -} - -func (sb *SegmentBase) updateSize() { - sizeInBytes := reflectStaticSizeSegmentBase + - cap(sb.mem) - - // fieldsMap - for k := range sb.fieldsMap { - sizeInBytes += (len(k) + SizeOfString) + SizeOfUint16 - } - - // fieldsInv, dictLocs - for _, entry := range sb.fieldsInv { - sizeInBytes += len(entry) + SizeOfString - } - sizeInBytes += len(sb.dictLocs) * SizeOfUint64 - - // fieldDvReaders - for _, v := range sb.fieldDvReaders { - sizeInBytes += SizeOfUint16 + SizeOfPtr - if v != nil { - sizeInBytes += v.size() - } - } - - sb.size = uint64(sizeInBytes) -} - -func (sb *SegmentBase) AddRef() {} -func (sb *SegmentBase) DecRef() (err error) { return nil } -func (sb *SegmentBase) Close() (err error) { return nil } - -// Segment implements a persisted segment.Segment interface, by -// embedding an mmap()'ed SegmentBase. -type Segment struct { - SegmentBase - - f *os.File - mm mmap.MMap - path string - version uint32 - crc uint32 - - m sync.Mutex // Protects the fields that follow. - refs int64 -} - -func (s *Segment) Size() int { - // 8 /* size of file pointer */ - // 4 /* size of version -> uint32 */ - // 4 /* size of crc -> uint32 */ - sizeOfUints := 16 - - sizeInBytes := (len(s.path) + SizeOfString) + sizeOfUints - - // mutex, refs -> int64 - sizeInBytes += 16 - - // do not include the mmap'ed part - return sizeInBytes + s.SegmentBase.Size() - cap(s.mem) -} - -func (s *Segment) AddRef() { - s.m.Lock() - s.refs++ - s.m.Unlock() -} - -func (s *Segment) DecRef() (err error) { - s.m.Lock() - s.refs-- - if s.refs == 0 { - err = s.closeActual() - } - s.m.Unlock() - return err -} - -func (s *Segment) loadConfig() error { - crcOffset := len(s.mm) - 4 - s.crc = binary.BigEndian.Uint32(s.mm[crcOffset : crcOffset+4]) - - verOffset := crcOffset - 4 - s.version = binary.BigEndian.Uint32(s.mm[verOffset : verOffset+4]) - if s.version != Version { - return fmt.Errorf("unsupported version %d", s.version) - } - - chunkOffset := verOffset - 4 - s.chunkMode = binary.BigEndian.Uint32(s.mm[chunkOffset : chunkOffset+4]) - - docValueOffset := chunkOffset - 8 - s.docValueOffset = binary.BigEndian.Uint64(s.mm[docValueOffset : docValueOffset+8]) - - fieldsIndexOffset := docValueOffset - 8 - s.fieldsIndexOffset = binary.BigEndian.Uint64(s.mm[fieldsIndexOffset : fieldsIndexOffset+8]) - - storedIndexOffset := fieldsIndexOffset - 8 - s.storedIndexOffset = binary.BigEndian.Uint64(s.mm[storedIndexOffset : storedIndexOffset+8]) - - numDocsOffset := storedIndexOffset - 8 - s.numDocs = binary.BigEndian.Uint64(s.mm[numDocsOffset : numDocsOffset+8]) - return nil -} - -func (s *SegmentBase) loadFields() error { - // NOTE for now we assume the fields index immediately precedes - // the footer, and if this changes, need to adjust accordingly (or - // store explicit length), where s.mem was sliced from s.mm in Open(). - fieldsIndexEnd := uint64(len(s.mem)) - - // iterate through fields index - var fieldID uint64 - for s.fieldsIndexOffset+(8*fieldID) < fieldsIndexEnd { - addr := binary.BigEndian.Uint64(s.mem[s.fieldsIndexOffset+(8*fieldID) : s.fieldsIndexOffset+(8*fieldID)+8]) - - dictLoc, read := binary.Uvarint(s.mem[addr:fieldsIndexEnd]) - n := uint64(read) - s.dictLocs = append(s.dictLocs, dictLoc) - - var nameLen uint64 - nameLen, read = binary.Uvarint(s.mem[addr+n : fieldsIndexEnd]) - n += uint64(read) - - name := string(s.mem[addr+n : addr+n+nameLen]) - s.fieldsInv = append(s.fieldsInv, name) - s.fieldsMap[name] = uint16(fieldID + 1) - - fieldID++ - } - return nil -} - -// Dictionary returns the term dictionary for the specified field -func (s *SegmentBase) Dictionary(field string) (segment.TermDictionary, error) { - dict, err := s.dictionary(field) - if err == nil && dict == nil { - return emptyDictionary, nil - } - return dict, err -} - -func (sb *SegmentBase) dictionary(field string) (rv *Dictionary, err error) { - fieldIDPlus1 := sb.fieldsMap[field] - if fieldIDPlus1 > 0 { - rv = &Dictionary{ - sb: sb, - field: field, - fieldID: fieldIDPlus1 - 1, - } - - dictStart := sb.dictLocs[rv.fieldID] - if dictStart > 0 { - var ok bool - sb.m.Lock() - if rv.fst, ok = sb.fieldFSTs[rv.fieldID]; !ok { - // read the length of the vellum data - vellumLen, read := binary.Uvarint(sb.mem[dictStart : dictStart+binary.MaxVarintLen64]) - fstBytes := sb.mem[dictStart+uint64(read) : dictStart+uint64(read)+vellumLen] - rv.fst, err = vellum.Load(fstBytes) - if err != nil { - sb.m.Unlock() - return nil, fmt.Errorf("dictionary field %s vellum err: %v", field, err) - } - - sb.fieldFSTs[rv.fieldID] = rv.fst - } - - sb.m.Unlock() - rv.fstReader, err = rv.fst.Reader() - if err != nil { - return nil, fmt.Errorf("dictionary field %s vellum reader err: %v", field, err) - } - } - } - - return rv, nil -} - -// visitDocumentCtx holds data structures that are reusable across -// multiple VisitDocument() calls to avoid memory allocations -type visitDocumentCtx struct { - buf []byte - reader bytes.Reader - arrayPos []uint64 -} - -var visitDocumentCtxPool = sync.Pool{ - New: func() interface{} { - reuse := &visitDocumentCtx{} - return reuse - }, -} - -// VisitStoredFields invokes the StoredFieldValueVisitor for each stored field -// for the specified doc number -func (s *SegmentBase) VisitStoredFields(num uint64, visitor segment.StoredFieldValueVisitor) error { - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - defer visitDocumentCtxPool.Put(vdc) - return s.visitStoredFields(vdc, num, visitor) -} - -func (s *SegmentBase) visitStoredFields(vdc *visitDocumentCtx, num uint64, - visitor segment.StoredFieldValueVisitor) error { - // first make sure this is a valid number in this segment - if num < s.numDocs { - meta, compressed := s.getDocStoredMetaAndCompressed(num) - - vdc.reader.Reset(meta) - - // handle _id field special case - idFieldValLen, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - idFieldVal := compressed[:idFieldValLen] - - keepGoing := visitor("_id", byte('t'), idFieldVal, nil) - if !keepGoing { - visitDocumentCtxPool.Put(vdc) - return nil - } - - // handle non-"_id" fields - compressed = compressed[idFieldValLen:] - - uncompressed, err := snappy.Decode(vdc.buf[:cap(vdc.buf)], compressed) - if err != nil { - return err - } - - for keepGoing { - field, err := binary.ReadUvarint(&vdc.reader) - if err == io.EOF { - break - } - if err != nil { - return err - } - typ, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - offset, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - l, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - numap, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - var arrayPos []uint64 - if numap > 0 { - if cap(vdc.arrayPos) < int(numap) { - vdc.arrayPos = make([]uint64, numap) - } - arrayPos = vdc.arrayPos[:numap] - for i := 0; i < int(numap); i++ { - ap, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - arrayPos[i] = ap - } - } - - value := uncompressed[offset : offset+l] - keepGoing = visitor(s.fieldsInv[field], byte(typ), value, arrayPos) - } - - vdc.buf = uncompressed - } - return nil -} - -// DocID returns the value of the _id field for the given docNum -func (s *SegmentBase) DocID(num uint64) ([]byte, error) { - if num >= s.numDocs { - return nil, nil - } - - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - - meta, compressed := s.getDocStoredMetaAndCompressed(num) - - vdc.reader.Reset(meta) - - // handle _id field special case - idFieldValLen, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return nil, err - } - idFieldVal := compressed[:idFieldValLen] - - visitDocumentCtxPool.Put(vdc) - - return idFieldVal, nil -} - -// Count returns the number of documents in this segment. -func (s *SegmentBase) Count() uint64 { - return s.numDocs -} - -// DocNumbers returns a bitset corresponding to the doc numbers of all the -// provided _id strings -func (s *SegmentBase) DocNumbers(ids []string) (*roaring.Bitmap, error) { - rv := roaring.New() - - if len(s.fieldsMap) > 0 { - idDict, err := s.dictionary("_id") - if err != nil { - return nil, err - } - - postingsList := emptyPostingsList - - sMax, err := idDict.fst.GetMaxKey() - if err != nil { - return nil, err - } - sMaxStr := string(sMax) - filteredIds := make([]string, 0, len(ids)) - for _, id := range ids { - if id <= sMaxStr { - filteredIds = append(filteredIds, id) - } - } - - for _, id := range filteredIds { - postingsList, err = idDict.postingsList([]byte(id), nil, postingsList) - if err != nil { - return nil, err - } - postingsList.OrInto(rv) - } - } - - return rv, nil -} - -// Fields returns the field names used in this segment -func (s *SegmentBase) Fields() []string { - return s.fieldsInv -} - -// Path returns the path of this segment on disk -func (s *Segment) Path() string { - return s.path -} - -// Close releases all resources associated with this segment -func (s *Segment) Close() (err error) { - return s.DecRef() -} - -func (s *Segment) closeActual() (err error) { - if s.mm != nil { - err = s.mm.Unmap() - } - // try to close file even if unmap failed - if s.f != nil { - err2 := s.f.Close() - if err == nil { - // try to return first error - err = err2 - } - } - return -} - -// some helpers i started adding for the command-line utility - -// Data returns the underlying mmaped data slice -func (s *Segment) Data() []byte { - return s.mm -} - -// CRC returns the CRC value stored in the file footer -func (s *Segment) CRC() uint32 { - return s.crc -} - -// Version returns the file version in the file footer -func (s *Segment) Version() uint32 { - return s.version -} - -// ChunkFactor returns the chunk factor in the file footer -func (s *Segment) ChunkMode() uint32 { - return s.chunkMode -} - -// FieldsIndexOffset returns the fields index offset in the file footer -func (s *Segment) FieldsIndexOffset() uint64 { - return s.fieldsIndexOffset -} - -// StoredIndexOffset returns the stored value index offset in the file footer -func (s *Segment) StoredIndexOffset() uint64 { - return s.storedIndexOffset -} - -// DocValueOffset returns the docValue offset in the file footer -func (s *Segment) DocValueOffset() uint64 { - return s.docValueOffset -} - -// NumDocs returns the number of documents in the file footer -func (s *Segment) NumDocs() uint64 { - return s.numDocs -} - -// DictAddr is a helper function to compute the file offset where the -// dictionary is stored for the specified field. -func (s *Segment) DictAddr(field string) (uint64, error) { - fieldIDPlus1, ok := s.fieldsMap[field] - if !ok { - return 0, fmt.Errorf("no such field '%s'", field) - } - - return s.dictLocs[fieldIDPlus1-1], nil -} - -func (s *SegmentBase) loadDvReaders() error { - if s.docValueOffset == fieldNotUninverted || s.numDocs == 0 { - return nil - } - - var read uint64 - for fieldID, field := range s.fieldsInv { - var fieldLocStart, fieldLocEnd uint64 - var n int - fieldLocStart, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) - if n <= 0 { - return fmt.Errorf("loadDvReaders: failed to read the docvalue offset start for field %d", fieldID) - } - read += uint64(n) - fieldLocEnd, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) - if n <= 0 { - return fmt.Errorf("loadDvReaders: failed to read the docvalue offset end for field %d", fieldID) - } - read += uint64(n) - - fieldDvReader, err := s.loadFieldDocValueReader(field, fieldLocStart, fieldLocEnd) - if err != nil { - return err - } - if fieldDvReader != nil { - s.fieldDvReaders[uint16(fieldID)] = fieldDvReader - s.fieldDvNames = append(s.fieldDvNames, field) - } - } - - return nil -} diff --git a/vendor/github.com/blevesearch/zapx/v12/sizes.go b/vendor/github.com/blevesearch/zapx/v12/sizes.go deleted file mode 100644 index 34166ea33..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/sizes.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "reflect" -) - -func init() { - var b bool - SizeOfBool = int(reflect.TypeOf(b).Size()) - var f32 float32 - SizeOfFloat32 = int(reflect.TypeOf(f32).Size()) - var f64 float64 - SizeOfFloat64 = int(reflect.TypeOf(f64).Size()) - var i int - SizeOfInt = int(reflect.TypeOf(i).Size()) - var m map[int]int - SizeOfMap = int(reflect.TypeOf(m).Size()) - var ptr *int - SizeOfPtr = int(reflect.TypeOf(ptr).Size()) - var slice []int - SizeOfSlice = int(reflect.TypeOf(slice).Size()) - var str string - SizeOfString = int(reflect.TypeOf(str).Size()) - var u8 uint8 - SizeOfUint8 = int(reflect.TypeOf(u8).Size()) - var u16 uint16 - SizeOfUint16 = int(reflect.TypeOf(u16).Size()) - var u32 uint32 - SizeOfUint32 = int(reflect.TypeOf(u32).Size()) - var u64 uint64 - SizeOfUint64 = int(reflect.TypeOf(u64).Size()) -} - -var SizeOfBool int -var SizeOfFloat32 int -var SizeOfFloat64 int -var SizeOfInt int -var SizeOfMap int -var SizeOfPtr int -var SizeOfSlice int -var SizeOfString int -var SizeOfUint8 int -var SizeOfUint16 int -var SizeOfUint32 int -var SizeOfUint64 int diff --git a/vendor/github.com/blevesearch/zapx/v12/write.go b/vendor/github.com/blevesearch/zapx/v12/write.go deleted file mode 100644 index 77aefdbfc..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/write.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "io" - - "github.com/RoaringBitmap/roaring" -) - -// writes out the length of the roaring bitmap in bytes as varint -// then writes out the roaring bitmap itself -func writeRoaringWithLen(r *roaring.Bitmap, w io.Writer, - reuseBufVarint []byte) (int, error) { - buf, err := r.ToBytes() - if err != nil { - return 0, err - } - - var tw int - - // write out the length - n := binary.PutUvarint(reuseBufVarint, uint64(len(buf))) - nw, err := w.Write(reuseBufVarint[:n]) - tw += nw - if err != nil { - return tw, err - } - - // write out the roaring bytes - nw, err = w.Write(buf) - tw += nw - if err != nil { - return tw, err - } - - return tw, nil -} - -func persistFields(fieldsInv []string, w *CountHashWriter, dictLocs []uint64) (uint64, error) { - var rv uint64 - var fieldsOffsets []uint64 - - for fieldID, fieldName := range fieldsInv { - // record start of this field - fieldsOffsets = append(fieldsOffsets, uint64(w.Count())) - - // write out the dict location and field name length - _, err := writeUvarints(w, dictLocs[fieldID], uint64(len(fieldName))) - if err != nil { - return 0, err - } - - // write out the field name - _, err = w.Write([]byte(fieldName)) - if err != nil { - return 0, err - } - } - - // now write out the fields index - rv = uint64(w.Count()) - for fieldID := range fieldsInv { - err := binary.Write(w, binary.BigEndian, fieldsOffsets[fieldID]) - if err != nil { - return 0, err - } - } - - return rv, nil -} - -// FooterSize is the size of the footer record in bytes -// crc + ver + chunk + field offset + stored offset + num docs + docValueOffset -const FooterSize = 4 + 4 + 4 + 8 + 8 + 8 + 8 - -func persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64, - chunkMode uint32, crcBeforeFooter uint32, writerIn io.Writer) error { - w := NewCountHashWriter(writerIn) - w.crc = crcBeforeFooter - - // write out the number of docs - err := binary.Write(w, binary.BigEndian, numDocs) - if err != nil { - return err - } - // write out the stored field index location: - err = binary.Write(w, binary.BigEndian, storedIndexOffset) - if err != nil { - return err - } - // write out the field index location - err = binary.Write(w, binary.BigEndian, fieldsIndexOffset) - if err != nil { - return err - } - // write out the fieldDocValue location - err = binary.Write(w, binary.BigEndian, docValueOffset) - if err != nil { - return err - } - // write out 32-bit chunk factor - err = binary.Write(w, binary.BigEndian, chunkMode) - if err != nil { - return err - } - // write out 32-bit version - err = binary.Write(w, binary.BigEndian, Version) - if err != nil { - return err - } - // write out CRC-32 of everything upto but not including this CRC - err = binary.Write(w, binary.BigEndian, w.crc) - if err != nil { - return err - } - return nil -} - -func writeUvarints(w io.Writer, vals ...uint64) (tw int, err error) { - buf := make([]byte, binary.MaxVarintLen64) - for _, val := range vals { - n := binary.PutUvarint(buf, val) - var nw int - nw, err = w.Write(buf[:n]) - tw += nw - if err != nil { - return tw, err - } - } - return tw, err -} diff --git a/vendor/github.com/blevesearch/zapx/v12/zap.md b/vendor/github.com/blevesearch/zapx/v12/zap.md deleted file mode 100644 index d74dc548b..000000000 --- a/vendor/github.com/blevesearch/zapx/v12/zap.md +++ /dev/null @@ -1,177 +0,0 @@ -# ZAP File Format - -## Legend - -### Sections - - |========| - | | section - |========| - -### Fixed-size fields - - |--------| |----| |--| |-| - | | uint64 | | uint32 | | uint16 | | uint8 - |--------| |----| |--| |-| - -### Varints - - |~~~~~~~~| - | | varint(up to uint64) - |~~~~~~~~| - -### Arbitrary-length fields - - |--------...---| - | | arbitrary-length field (string, vellum, roaring bitmap) - |--------...---| - -### Chunked data - - [--------] - [ ] - [--------] - -## Overview - -Footer section describes the configuration of particular ZAP file. The format of footer is version-dependent, so it is necessary to check `V` field before the parsing. - - |==================================================| - | Stored Fields | - |==================================================| - |-----> | Stored Fields Index | - | |==================================================| - | | Dictionaries + Postings + DocValues | - | |==================================================| - | |---> | DocValues Index | - | | |==================================================| - | | | Fields | - | | |==================================================| - | | |-> | Fields Index | - | | | |========|========|========|========|====|====|====| - | | | | D# | SF | F | FDV | CF | V | CC | (Footer) - | | | |========|====|===|====|===|====|===|====|====|====| - | | | | | | - |-+-+-----------------| | | - | |--------------------------| | - |-------------------------------------| - - D#. Number of Docs. - SF. Stored Fields Index Offset. - F. Field Index Offset. - FDV. Field DocValue Offset. - CF. Chunk Factor. - V. Version. - CC. CRC32. - -## Stored Fields - -Stored Fields Index is `D#` consecutive 64-bit unsigned integers - offsets, where relevant Stored Fields Data records are located. - - 0 [SF] [SF + D# * 8] - | Stored Fields | Stored Fields Index | - |================================|==================================| - | | | - | |--------------------| ||--------|--------|. . .|--------|| - | |-> | Stored Fields Data | || 0 | 1 | | D# - 1 || - | | |--------------------| ||--------|----|---|. . .|--------|| - | | | | | - |===|============================|==============|===================| - | | - |-------------------------------------------| - -Stored Fields Data is an arbitrary size record, which consists of metadata and [Snappy](https://github.com/golang/snappy)-compressed data. - - Stored Fields Data - |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| - | MDS | CDS | MD | CD | - |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| - - MDS. Metadata size. - CDS. Compressed data size. - MD. Metadata. - CD. Snappy-compressed data. - -## Fields - -Fields Index section located between addresses `F` and `len(file) - len(footer)` and consist of `uint64` values (`F1`, `F2`, ...) which are offsets to records in Fields section. We have `F# = (len(file) - len(footer) - F) / sizeof(uint64)` fields. - - - (...) [F] [F + F#] - | Fields | Fields Index. | - |================================|================================| - | | | - | |~~~~~~~~|~~~~~~~~|---...---|||--------|--------|...|--------|| - ||->| Dict | Length | Name ||| 0 | 1 | | F# - 1 || - || |~~~~~~~~|~~~~~~~~|---...---|||--------|----|---|...|--------|| - || | | | - ||===============================|==============|=================| - | | - |----------------------------------------------| - - -## Dictionaries + Postings - -Each of fields has its own dictionary, encoded in [Vellum](https://github.com/couchbase/vellum) format. Dictionary consists of pairs `(term, offset)`, where `offset` indicates the position of postings (list of documents) for this particular term. - - |================================================================|- Dictionaries + - | | Postings + - | | DocValues - | Freq/Norm (chunked) | - | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | - | |->[ Freq | Norm (float32 under varint) ] | - | | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | - | | | - | |------------------------------------------------------------| | - | Location Details (chunked) | | - | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | - | |->[ Size | Pos | Start | End | Arr# | ArrPos | ... ] | | - | | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | - | | | | - | |----------------------| | | - | Postings List | | | - | |~~~~~~~~|~~~~~|~~|~~~~~~~~|-----------...--| | | - | |->| F/N | LD | Length | ROARING BITMAP | | | - | | |~~~~~|~~|~~~~~~~~|~~~~~~~~|-----------...--| | | - | | |----------------------------------------------| | - | |--------------------------------------| | - | Dictionary | | - | |~~~~~~~~|--------------------------|-...-| | - | |->| Length | VELLUM DATA : (TERM -> OFFSET) | | - | | |~~~~~~~~|----------------------------...-| | - | | | - |======|=========================================================|- DocValues Index - | | | - |======|=========================================================|- Fields - | | | - | |~~~~|~~~|~~~~~~~~|---...---| | - | | Dict | Length | Name | | - | |~~~~~~~~|~~~~~~~~|---...---| | - | | - |================================================================| - -## DocValues - -DocValues Index is `F#` pairs of varints, one pair per field. Each pair of varints indicates start and end point of DocValues slice. - - |================================================================| - | |------...--| | - | |->| DocValues |<-| | - | | |------...--| | | - |==|=================|===========================================|- DocValues Index - ||~|~~~~~~~~~|~~~~~~~|~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| - || DV1 START | DV1 STOP | . . . . . | DV(F#) START | DV(F#) END || - ||~~~~~~~~~~~|~~~~~~~~~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| - |================================================================| - -DocValues is chunked Snappy-compressed values for each document and field. - - [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] - [ Doc# in Chunk | Doc1 | Offset1 | ... | DocN | OffsetN | SNAPPY COMPRESSED DATA ] - [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] - -Last 16 bytes are description of chunks. - - |~~~~~~~~~~~~...~|----------------|----------------| - | Chunk Sizes | Chunk Size Arr | Chunk# | - |~~~~~~~~~~~~...~|----------------|----------------| diff --git a/vendor/github.com/blevesearch/zapx/v13/.gitignore b/vendor/github.com/blevesearch/zapx/v13/.gitignore deleted file mode 100644 index 46d1cfad5..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -#* -*.sublime-* -*~ -.#* -.project -.settings -**/.idea/ -**/*.iml -.DS_Store -/cmd/zap/zap -*.test -tags diff --git a/vendor/github.com/blevesearch/zapx/v13/.golangci.yml b/vendor/github.com/blevesearch/zapx/v13/.golangci.yml deleted file mode 100644 index f0f2f6067..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/.golangci.yml +++ /dev/null @@ -1,29 +0,0 @@ -linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true - enable: - - bodyclose - - deadcode - - depguard - - dupl - - errcheck - - gofmt - - goimports - - goprintffuncname - - gosec - - gosimple - - govet - - ineffassign - - interfacer - - misspell - - nakedret - - nolintlint - - rowserrcheck - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - whitespace - diff --git a/vendor/github.com/blevesearch/zapx/v13/LICENSE b/vendor/github.com/blevesearch/zapx/v13/LICENSE deleted file mode 100644 index 7a4a3ea24..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/blevesearch/zapx/v13/README.md b/vendor/github.com/blevesearch/zapx/v13/README.md deleted file mode 100644 index 4cbf1a145..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# zapx file format - -The zapx module is fork of [zap](https://github.com/blevesearch/zap) module which maintains file format compatibility, but removes dependency on bleve, and instead depends only on the indepenent interface modules: - -- [bleve_index_api](https://github.com/blevesearch/scorch_segment_api) -- [scorch_segment_api](https://github.com/blevesearch/scorch_segment_api) - -Advanced ZAP File Format Documentation is [here](zap.md). - -The file is written in the reverse order that we typically access data. This helps us write in one pass since later sections of the file require file offsets of things we've already written. - -Current usage: - -- mmap the entire file -- crc-32 bytes and version are in fixed position at end of the file -- reading remainder of footer could be version specific -- remainder of footer gives us: - - 3 important offsets (docValue , fields index and stored data index) - - 2 important values (number of docs and chunk factor) -- field data is processed once and memoized onto the heap so that we never have to go back to disk for it -- access to stored data by doc number means first navigating to the stored data index, then accessing a fixed position offset into that slice, which gives us the actual address of the data. the first bytes of that section tell us the size of data so that we know where it ends. -- access to all other indexed data follows the following pattern: - - first know the field name -> convert to id - - next navigate to term dictionary for that field - - some operations stop here and do dictionary ops - - next use dictionary to navigate to posting list for a specific term - - walk posting list - - if necessary, walk posting details as we go - - if location info is desired, consult location bitmap to see if it is there - -## stored fields section - -- for each document - - preparation phase: - - produce a slice of metadata bytes and data bytes - - produce these slices in field id order - - field value is appended to the data slice - - metadata slice is varint encoded with the following values for each field value - - field id (uint16) - - field type (byte) - - field value start offset in uncompressed data slice (uint64) - - field value length (uint64) - - field number of array positions (uint64) - - one additional value for each array position (uint64) - - compress the data slice using snappy - - file writing phase: - - remember the start offset for this document - - write out meta data length (varint uint64) - - write out compressed data length (varint uint64) - - write out the metadata bytes - - write out the compressed data bytes - -## stored fields idx - -- for each document - - write start offset (remembered from previous section) of stored data (big endian uint64) - -With this index and a known document number, we have direct access to all the stored field data. - -## posting details (freq/norm) section - -- for each posting list - - produce a slice containing multiple consecutive chunks (each chunk is varint stream) - - produce a slice remembering offsets of where each chunk starts - - preparation phase: - - for each hit in the posting list - - if this hit is in next chunk close out encoding of last chunk and record offset start of next - - encode term frequency (uint64) - - encode norm factor (float32) - - file writing phase: - - remember start position for this posting list details - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. - -## posting details (location) section - -- for each posting list - - produce a slice containing multiple consecutive chunks (each chunk is varint stream) - - produce a slice remembering offsets of where each chunk starts - - preparation phase: - - for each hit in the posting list - - if this hit is in next chunk close out encoding of last chunk and record offset start of next - - encode field (uint16) - - encode field pos (uint64) - - encode field start (uint64) - - encode field end (uint64) - - encode number of array positions to follow (uint64) - - encode each array position (each uint64) - - file writing phase: - - remember start position for this posting list details - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. - -## postings list section - -- for each posting list - - preparation phase: - - encode roaring bitmap posting list to bytes (so we know the length) - - file writing phase: - - remember the start position for this posting list - - write freq/norm details offset (remembered from previous, as varint uint64) - - write location details offset (remembered from previous, as varint uint64) - - write length of encoded roaring bitmap - - write the serialized roaring bitmap data - -## dictionary - -- for each field - - preparation phase: - - encode vellum FST with dictionary data pointing to file offset of posting list (remembered from previous) - - file writing phase: - - remember the start position of this persistDictionary - - write length of vellum data (varint uint64) - - write out vellum data - -## fields section - -- for each field - - file writing phase: - - remember start offset for each field - - write dictionary address (remembered from previous) (varint uint64) - - write length of field name (varint uint64) - - write field name bytes - -## fields idx - -- for each field - - file writing phase: - - write big endian uint64 of start offset for each field - -NOTE: currently we don't know or record the length of this fields index. Instead we rely on the fact that we know it immediately precedes a footer of known size. - -## fields DocValue - -- for each field - - preparation phase: - - produce a slice containing multiple consecutive chunks, where each chunk is composed of a meta section followed by compressed columnar field data - - produce a slice remembering the length of each chunk - - file writing phase: - - remember the start position of this first field DocValue offset in the footer - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -NOTE: currently the meta header inside each chunk gives clue to the location offsets and size of the data pertaining to a given docID and any -read operation leverage that meta information to extract the document specific data from the file. - -## footer - -- file writing phase - - write number of docs (big endian uint64) - - write stored field index location (big endian uint64) - - write field index location (big endian uint64) - - write field docValue location (big endian uint64) - - write out chunk factor (big endian uint32) - - write out version (big endian uint32) - - write out file CRC of everything preceding this (big endian uint32) diff --git a/vendor/github.com/blevesearch/zapx/v13/build.go b/vendor/github.com/blevesearch/zapx/v13/build.go deleted file mode 100644 index 5a25eef77..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/build.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bufio" - "math" - "os" - - "github.com/blevesearch/vellum" -) - -const Version uint32 = 13 - -const Type string = "zap" - -const fieldNotUninverted = math.MaxUint64 - -func (sb *SegmentBase) Persist(path string) error { - return PersistSegmentBase(sb, path) -} - -// PersistSegmentBase persists SegmentBase in the zap file format. -func PersistSegmentBase(sb *SegmentBase, path string) error { - flag := os.O_RDWR | os.O_CREATE - - f, err := os.OpenFile(path, flag, 0600) - if err != nil { - return err - } - - cleanup := func() { - _ = f.Close() - _ = os.Remove(path) - } - - br := bufio.NewWriter(f) - - _, err = br.Write(sb.mem) - if err != nil { - cleanup() - return err - } - - err = persistFooter(sb.numDocs, sb.storedIndexOffset, sb.fieldsIndexOffset, sb.docValueOffset, - sb.chunkMode, sb.memCRC, br) - if err != nil { - cleanup() - return err - } - - err = br.Flush() - if err != nil { - cleanup() - return err - } - - err = f.Sync() - if err != nil { - cleanup() - return err - } - - err = f.Close() - if err != nil { - cleanup() - return err - } - - return nil -} - -func persistStoredFieldValues(fieldID int, - storedFieldValues [][]byte, stf []byte, spf [][]uint64, - curr int, metaEncode varintEncoder, data []byte) ( - int, []byte, error) { - for i := 0; i < len(storedFieldValues); i++ { - // encode field - _, err := metaEncode(uint64(fieldID)) - if err != nil { - return 0, nil, err - } - // encode type - _, err = metaEncode(uint64(stf[i])) - if err != nil { - return 0, nil, err - } - // encode start offset - _, err = metaEncode(uint64(curr)) - if err != nil { - return 0, nil, err - } - // end len - _, err = metaEncode(uint64(len(storedFieldValues[i]))) - if err != nil { - return 0, nil, err - } - // encode number of array pos - _, err = metaEncode(uint64(len(spf[i]))) - if err != nil { - return 0, nil, err - } - // encode all array positions - for _, pos := range spf[i] { - _, err = metaEncode(pos) - if err != nil { - return 0, nil, err - } - } - - data = append(data, storedFieldValues[i]...) - curr += len(storedFieldValues[i]) - } - - return curr, data, nil -} - -func InitSegmentBase(mem []byte, memCRC uint32, chunkMode uint32, - fieldsMap map[string]uint16, fieldsInv []string, numDocs uint64, - storedIndexOffset uint64, fieldsIndexOffset uint64, docValueOffset uint64, - dictLocs []uint64) (*SegmentBase, error) { - sb := &SegmentBase{ - mem: mem, - memCRC: memCRC, - chunkMode: chunkMode, - fieldsMap: fieldsMap, - fieldsInv: fieldsInv, - numDocs: numDocs, - storedIndexOffset: storedIndexOffset, - fieldsIndexOffset: fieldsIndexOffset, - docValueOffset: docValueOffset, - dictLocs: dictLocs, - fieldDvReaders: make(map[uint16]*docValueReader), - fieldFSTs: make(map[uint16]*vellum.FST), - } - sb.updateSize() - - err := sb.loadDvReaders() - if err != nil { - return nil, err - } - - return sb, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v13/chunk.go b/vendor/github.com/blevesearch/zapx/v13/chunk.go deleted file mode 100644 index fe9f398da..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/chunk.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2019 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" -) - -// LegacyChunkMode was the original chunk mode (always chunk size 1024) -// this mode is still used for chunking doc values. -var LegacyChunkMode uint32 = 1024 - -// DefaultChunkMode is the most recent improvement to chunking and should -// be used by default. -var DefaultChunkMode uint32 = 1025 - -func getChunkSize(chunkMode uint32, cardinality uint64, maxDocs uint64) (uint64, error) { - switch { - // any chunkMode <= 1024 will always chunk with chunkSize=chunkMode - case chunkMode <= 1024: - // legacy chunk size - return uint64(chunkMode), nil - - case chunkMode == 1025: - // attempt at simple improvement - // theory - the point of chunking is to put a bound on the maximum number of - // calls to Next() needed to find a random document. ie, you should be able - // to do one jump to the correct chunk, and then walk through at most - // chunk-size items - // previously 1024 was chosen as the chunk size, but this is particularly - // wasteful for low cardinality terms. the observation is that if there - // are less than 1024 items, why not put them all in one chunk, - // this way you'll still achieve the same goal of visiting at most - // chunk-size items. - // no attempt is made to tweak any other case - if cardinality <= 1024 { - return maxDocs, nil - } - return 1024, nil - } - return 0, fmt.Errorf("unknown chunk mode %d", chunkMode) -} diff --git a/vendor/github.com/blevesearch/zapx/v13/contentcoder.go b/vendor/github.com/blevesearch/zapx/v13/contentcoder.go deleted file mode 100644 index c145b5a11..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/contentcoder.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "io" - "reflect" - - "github.com/golang/snappy" -) - -var reflectStaticSizeMetaData int - -func init() { - var md MetaData - reflectStaticSizeMetaData = int(reflect.TypeOf(md).Size()) -} - -var termSeparator byte = 0xff -var termSeparatorSplitSlice = []byte{termSeparator} - -type chunkedContentCoder struct { - final []byte - chunkSize uint64 - currChunk uint64 - chunkLens []uint64 - - w io.Writer - progressiveWrite bool - - chunkMetaBuf bytes.Buffer - chunkBuf bytes.Buffer - - chunkMeta []MetaData - - compressed []byte // temp buf for snappy compression -} - -// MetaData represents the data information inside a -// chunk. -type MetaData struct { - DocNum uint64 // docNum of the data inside the chunk - DocDvOffset uint64 // offset of data inside the chunk for the given docid -} - -// newChunkedContentCoder returns a new chunk content coder which -// packs data into chunks based on the provided chunkSize -func newChunkedContentCoder(chunkSize uint64, maxDocNum uint64, - w io.Writer, progressiveWrite bool) *chunkedContentCoder { - total := maxDocNum/chunkSize + 1 - rv := &chunkedContentCoder{ - chunkSize: chunkSize, - chunkLens: make([]uint64, total), - chunkMeta: make([]MetaData, 0, total), - w: w, - progressiveWrite: progressiveWrite, - } - - return rv -} - -// Reset lets you reuse this chunked content coder. Buffers are reset -// and re used. You cannot change the chunk size. -func (c *chunkedContentCoder) Reset() { - c.currChunk = 0 - c.final = c.final[:0] - c.chunkBuf.Reset() - c.chunkMetaBuf.Reset() - for i := range c.chunkLens { - c.chunkLens[i] = 0 - } - c.chunkMeta = c.chunkMeta[:0] -} - -func (c *chunkedContentCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { - total := int(maxDocNum/chunkSize + 1) - c.chunkSize = chunkSize - if cap(c.chunkLens) < total { - c.chunkLens = make([]uint64, total) - } else { - c.chunkLens = c.chunkLens[:total] - } - if cap(c.chunkMeta) < total { - c.chunkMeta = make([]MetaData, 0, total) - } -} - -// Close indicates you are done calling Add() this allows -// the final chunk to be encoded. -func (c *chunkedContentCoder) Close() error { - return c.flushContents() -} - -func (c *chunkedContentCoder) flushContents() error { - // flush the contents, with meta information at first - buf := make([]byte, binary.MaxVarintLen64) - n := binary.PutUvarint(buf, uint64(len(c.chunkMeta))) - _, err := c.chunkMetaBuf.Write(buf[:n]) - if err != nil { - return err - } - - // write out the metaData slice - for _, meta := range c.chunkMeta { - _, err := writeUvarints(&c.chunkMetaBuf, meta.DocNum, meta.DocDvOffset) - if err != nil { - return err - } - } - - // write the metadata to final data - metaData := c.chunkMetaBuf.Bytes() - c.final = append(c.final, c.chunkMetaBuf.Bytes()...) - // write the compressed data to the final data - c.compressed = snappy.Encode(c.compressed[:cap(c.compressed)], c.chunkBuf.Bytes()) - c.final = append(c.final, c.compressed...) - - c.chunkLens[c.currChunk] = uint64(len(c.compressed) + len(metaData)) - - if c.progressiveWrite { - _, err := c.w.Write(c.final) - if err != nil { - return err - } - c.final = c.final[:0] - } - - return nil -} - -// Add encodes the provided byte slice into the correct chunk for the provided -// doc num. You MUST call Add() with increasing docNums. -func (c *chunkedContentCoder) Add(docNum uint64, vals []byte) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // flush out the previous chunk details - err := c.flushContents() - if err != nil { - return err - } - // clearing the chunk specific meta for next chunk - c.chunkBuf.Reset() - c.chunkMetaBuf.Reset() - c.chunkMeta = c.chunkMeta[:0] - c.currChunk = chunk - } - - // get the starting offset for this doc - dvOffset := c.chunkBuf.Len() - dvSize, err := c.chunkBuf.Write(vals) - if err != nil { - return err - } - - c.chunkMeta = append(c.chunkMeta, MetaData{ - DocNum: docNum, - DocDvOffset: uint64(dvOffset + dvSize), - }) - return nil -} - -// Write commits all the encoded chunked contents to the provided writer. -// -// | ..... data ..... | chunk offsets (varints) -// | position of chunk offsets (uint64) | number of offsets (uint64) | -// -func (c *chunkedContentCoder) Write() (int, error) { - var tw int - - if c.final != nil { - // write out the data section first - nw, err := c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - } - - chunkOffsetsStart := uint64(tw) - - if cap(c.final) < binary.MaxVarintLen64 { - c.final = make([]byte, binary.MaxVarintLen64) - } else { - c.final = c.final[0:binary.MaxVarintLen64] - } - chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens) - // write out the chunk offsets - for _, chunkOffset := range chunkOffsets { - n := binary.PutUvarint(c.final, chunkOffset) - nw, err := c.w.Write(c.final[:n]) - tw += nw - if err != nil { - return tw, err - } - } - - chunkOffsetsLen := uint64(tw) - chunkOffsetsStart - - c.final = c.final[0:8] - // write out the length of chunk offsets - binary.BigEndian.PutUint64(c.final, chunkOffsetsLen) - nw, err := c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - - // write out the number of chunks - binary.BigEndian.PutUint64(c.final, uint64(len(c.chunkLens))) - nw, err = c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - - c.final = c.final[:0] - - return tw, nil -} - -// ReadDocValueBoundary elicits the start, end offsets from a -// metaData header slice -func ReadDocValueBoundary(chunk int, metaHeaders []MetaData) (uint64, uint64) { - var start uint64 - if chunk > 0 { - start = metaHeaders[chunk-1].DocDvOffset - } - return start, metaHeaders[chunk].DocDvOffset -} diff --git a/vendor/github.com/blevesearch/zapx/v13/count.go b/vendor/github.com/blevesearch/zapx/v13/count.go deleted file mode 100644 index b6135359f..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/count.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "hash/crc32" - "io" - - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -// CountHashWriter is a wrapper around a Writer which counts the number of -// bytes which have been written and computes a crc32 hash -type CountHashWriter struct { - w io.Writer - crc uint32 - n int - s segment.StatsReporter -} - -// NewCountHashWriter returns a CountHashWriter which wraps the provided Writer -func NewCountHashWriter(w io.Writer) *CountHashWriter { - return &CountHashWriter{w: w} -} - -func NewCountHashWriterWithStatsReporter(w io.Writer, s segment.StatsReporter) *CountHashWriter { - return &CountHashWriter{w: w, s: s} -} - -// Write writes the provided bytes to the wrapped writer and counts the bytes -func (c *CountHashWriter) Write(b []byte) (int, error) { - n, err := c.w.Write(b) - c.crc = crc32.Update(c.crc, crc32.IEEETable, b[:n]) - c.n += n - if c.s != nil { - c.s.ReportBytesWritten(uint64(n)) - } - return n, err -} - -// Count returns the number of bytes written -func (c *CountHashWriter) Count() int { - return c.n -} - -// Sum32 returns the CRC-32 hash of the content written to this writer -func (c *CountHashWriter) Sum32() uint32 { - return c.crc -} diff --git a/vendor/github.com/blevesearch/zapx/v13/dict.go b/vendor/github.com/blevesearch/zapx/v13/dict.go deleted file mode 100644 index e30bf2420..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/dict.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" -) - -// Dictionary is the zap representation of the term dictionary -type Dictionary struct { - sb *SegmentBase - field string - fieldID uint16 - fst *vellum.FST - fstReader *vellum.Reader -} - -// represents an immutable, empty dictionary -var emptyDictionary = &Dictionary{} - -// PostingsList returns the postings list for the specified term -func (d *Dictionary) PostingsList(term []byte, except *roaring.Bitmap, - prealloc segment.PostingsList) (segment.PostingsList, error) { - var preallocPL *PostingsList - pl, ok := prealloc.(*PostingsList) - if ok && pl != nil { - preallocPL = pl - } - return d.postingsList(term, except, preallocPL) -} - -func (d *Dictionary) postingsList(term []byte, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) { - if d.fstReader == nil { - if rv == nil || rv == emptyPostingsList { - return emptyPostingsList, nil - } - return d.postingsListInit(rv, except), nil - } - - postingsOffset, exists, err := d.fstReader.Get(term) - if err != nil { - return nil, fmt.Errorf("vellum err: %v", err) - } - if !exists { - if rv == nil || rv == emptyPostingsList { - return emptyPostingsList, nil - } - return d.postingsListInit(rv, except), nil - } - - return d.postingsListFromOffset(postingsOffset, except, rv) -} - -func (d *Dictionary) postingsListFromOffset(postingsOffset uint64, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) { - rv = d.postingsListInit(rv, except) - - err := rv.read(postingsOffset, d) - if err != nil { - return nil, err - } - - return rv, nil -} - -func (d *Dictionary) postingsListInit(rv *PostingsList, except *roaring.Bitmap) *PostingsList { - if rv == nil || rv == emptyPostingsList { - rv = &PostingsList{} - } else { - postings := rv.postings - if postings != nil { - postings.Clear() - } - - *rv = PostingsList{} // clear the struct - - rv.postings = postings - } - rv.sb = d.sb - rv.except = except - return rv -} - -func (d *Dictionary) Contains(key []byte) (bool, error) { - if d.fst != nil { - return d.fst.Contains(key) - } - return false, nil -} - -// AutomatonIterator returns an iterator which only visits terms -// having the the vellum automaton and start/end key range -func (d *Dictionary) AutomatonIterator(a segment.Automaton, - startKeyInclusive, endKeyExclusive []byte) segment.DictionaryIterator { - if d.fst != nil { - rv := &DictionaryIterator{ - d: d, - } - - itr, err := d.fst.Search(a, startKeyInclusive, endKeyExclusive) - if err == nil { - rv.itr = itr - } else if err != vellum.ErrIteratorDone { - rv.err = err - } - - return rv - } - return emptyDictionaryIterator -} - -// DictionaryIterator is an iterator for term dictionary -type DictionaryIterator struct { - d *Dictionary - itr vellum.Iterator - err error - tmp PostingsList - entry index.DictEntry - omitCount bool -} - -var emptyDictionaryIterator = &DictionaryIterator{} - -// Next returns the next entry in the dictionary -func (i *DictionaryIterator) Next() (*index.DictEntry, error) { - if i.err != nil && i.err != vellum.ErrIteratorDone { - return nil, i.err - } else if i.itr == nil || i.err == vellum.ErrIteratorDone { - return nil, nil - } - term, postingsOffset := i.itr.Current() - i.entry.Term = string(term) - if !i.omitCount { - i.err = i.tmp.read(postingsOffset, i.d) - if i.err != nil { - return nil, i.err - } - i.entry.Count = i.tmp.Count() - } - i.err = i.itr.Next() - return &i.entry, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v13/docvalues.go b/vendor/github.com/blevesearch/zapx/v13/docvalues.go deleted file mode 100644 index a530aa5ad..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/docvalues.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "fmt" - "math" - "reflect" - "sort" - - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/golang/snappy" -) - -var reflectStaticSizedocValueReader int - -func init() { - var dvi docValueReader - reflectStaticSizedocValueReader = int(reflect.TypeOf(dvi).Size()) -} - -type docNumTermsVisitor func(docNum uint64, terms []byte) error - -type docVisitState struct { - dvrs map[uint16]*docValueReader - segment *SegmentBase -} - -type docValueReader struct { - field string - curChunkNum uint64 - chunkOffsets []uint64 - dvDataLoc uint64 - curChunkHeader []MetaData - curChunkData []byte // compressed data cache - uncompressed []byte // temp buf for snappy decompression -} - -func (di *docValueReader) size() int { - return reflectStaticSizedocValueReader + SizeOfPtr + - len(di.field) + - len(di.chunkOffsets)*SizeOfUint64 + - len(di.curChunkHeader)*reflectStaticSizeMetaData + - len(di.curChunkData) -} - -func (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader { - if rv == nil { - rv = &docValueReader{} - } - - rv.field = di.field - rv.curChunkNum = math.MaxUint64 - rv.chunkOffsets = di.chunkOffsets // immutable, so it's sharable - rv.dvDataLoc = di.dvDataLoc - rv.curChunkHeader = rv.curChunkHeader[:0] - rv.curChunkData = nil - rv.uncompressed = rv.uncompressed[:0] - - return rv -} - -func (di *docValueReader) curChunkNumber() uint64 { - return di.curChunkNum -} - -func (s *SegmentBase) loadFieldDocValueReader(field string, - fieldDvLocStart, fieldDvLocEnd uint64) (*docValueReader, error) { - // get the docValue offset for the given fields - if fieldDvLocStart == fieldNotUninverted { - // no docValues found, nothing to do - return nil, nil - } - - // read the number of chunks, and chunk offsets position - var numChunks, chunkOffsetsPosition uint64 - - if fieldDvLocEnd-fieldDvLocStart > 16 { - numChunks = binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-8 : fieldDvLocEnd]) - // read the length of chunk offsets - chunkOffsetsLen := binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-16 : fieldDvLocEnd-8]) - // acquire position of chunk offsets - chunkOffsetsPosition = (fieldDvLocEnd - 16) - chunkOffsetsLen - } else { - return nil, fmt.Errorf("loadFieldDocValueReader: fieldDvLoc too small: %d-%d", fieldDvLocEnd, fieldDvLocStart) - } - - fdvIter := &docValueReader{ - curChunkNum: math.MaxUint64, - field: field, - chunkOffsets: make([]uint64, int(numChunks)), - } - - // read the chunk offsets - var offset uint64 - for i := 0; i < int(numChunks); i++ { - loc, read := binary.Uvarint(s.mem[chunkOffsetsPosition+offset : chunkOffsetsPosition+offset+binary.MaxVarintLen64]) - if read <= 0 { - return nil, fmt.Errorf("corrupted chunk offset during segment load") - } - fdvIter.chunkOffsets[i] = loc - offset += uint64(read) - } - - // set the data offset - fdvIter.dvDataLoc = fieldDvLocStart - - return fdvIter, nil -} - -func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error { - // advance to the chunk where the docValues - // reside for the given docNum - destChunkDataLoc, curChunkEnd := di.dvDataLoc, di.dvDataLoc - start, end := readChunkBoundary(int(chunkNumber), di.chunkOffsets) - if start >= end { - di.curChunkHeader = di.curChunkHeader[:0] - di.curChunkData = nil - di.curChunkNum = chunkNumber - di.uncompressed = di.uncompressed[:0] - return nil - } - - destChunkDataLoc += start - curChunkEnd += end - - // read the number of docs reside in the chunk - numDocs, read := binary.Uvarint(s.mem[destChunkDataLoc : destChunkDataLoc+binary.MaxVarintLen64]) - if read <= 0 { - return fmt.Errorf("failed to read the chunk") - } - chunkMetaLoc := destChunkDataLoc + uint64(read) - - offset := uint64(0) - if cap(di.curChunkHeader) < int(numDocs) { - di.curChunkHeader = make([]MetaData, int(numDocs)) - } else { - di.curChunkHeader = di.curChunkHeader[:int(numDocs)] - } - for i := 0; i < int(numDocs); i++ { - di.curChunkHeader[i].DocNum, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) - offset += uint64(read) - di.curChunkHeader[i].DocDvOffset, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) - offset += uint64(read) - } - - compressedDataLoc := chunkMetaLoc + offset - dataLength := curChunkEnd - compressedDataLoc - di.curChunkData = s.mem[compressedDataLoc : compressedDataLoc+dataLength] - di.curChunkNum = chunkNumber - di.uncompressed = di.uncompressed[:0] - return nil -} - -func (di *docValueReader) iterateAllDocValues(s *SegmentBase, visitor docNumTermsVisitor) error { - for i := 0; i < len(di.chunkOffsets); i++ { - err := di.loadDvChunk(uint64(i), s) - if err != nil { - return err - } - if di.curChunkData == nil || len(di.curChunkHeader) == 0 { - continue - } - - // uncompress the already loaded data - uncompressed, err := snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData) - if err != nil { - return err - } - di.uncompressed = uncompressed - - start := uint64(0) - for _, entry := range di.curChunkHeader { - err = visitor(entry.DocNum, uncompressed[start:entry.DocDvOffset]) - if err != nil { - return err - } - - start = entry.DocDvOffset - } - } - - return nil -} - -func (di *docValueReader) visitDocValues(docNum uint64, - visitor index.DocValueVisitor) error { - // binary search the term locations for the docNum - start, end := di.getDocValueLocs(docNum) - if start == math.MaxUint64 || end == math.MaxUint64 || start == end { - return nil - } - - var uncompressed []byte - var err error - // use the uncompressed copy if available - if len(di.uncompressed) > 0 { - uncompressed = di.uncompressed - } else { - // uncompress the already loaded data - uncompressed, err = snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData) - if err != nil { - return err - } - di.uncompressed = uncompressed - } - - // pick the terms for the given docNum - uncompressed = uncompressed[start:end] - for { - i := bytes.Index(uncompressed, termSeparatorSplitSlice) - if i < 0 { - break - } - - visitor(di.field, uncompressed[0:i]) - uncompressed = uncompressed[i+1:] - } - - return nil -} - -func (di *docValueReader) getDocValueLocs(docNum uint64) (uint64, uint64) { - i := sort.Search(len(di.curChunkHeader), func(i int) bool { - return di.curChunkHeader[i].DocNum >= docNum - }) - if i < len(di.curChunkHeader) && di.curChunkHeader[i].DocNum == docNum { - return ReadDocValueBoundary(i, di.curChunkHeader) - } - return math.MaxUint64, math.MaxUint64 -} - -// VisitDocValues is an implementation of the -// DocValueVisitable interface -func (s *SegmentBase) VisitDocValues(localDocNum uint64, fields []string, - visitor index.DocValueVisitor, dvsIn segment.DocVisitState) ( - segment.DocVisitState, error) { - dvs, ok := dvsIn.(*docVisitState) - if !ok || dvs == nil { - dvs = &docVisitState{} - } else { - if dvs.segment != s { - dvs.segment = s - dvs.dvrs = nil - } - } - - var fieldIDPlus1 uint16 - if dvs.dvrs == nil { - dvs.dvrs = make(map[uint16]*docValueReader, len(fields)) - for _, field := range fields { - if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { - continue - } - fieldID := fieldIDPlus1 - 1 - if dvIter, exists := s.fieldDvReaders[fieldID]; exists && - dvIter != nil { - dvs.dvrs[fieldID] = dvIter.cloneInto(dvs.dvrs[fieldID]) - } - } - } - - // find the chunkNumber where the docValues are stored - // NOTE: doc values continue to use legacy chunk mode - chunkFactor, err := getChunkSize(LegacyChunkMode, 0, 0) - if err != nil { - return nil, err - } - docInChunk := localDocNum / chunkFactor - var dvr *docValueReader - for _, field := range fields { - if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { - continue - } - fieldID := fieldIDPlus1 - 1 - if dvr, ok = dvs.dvrs[fieldID]; ok && dvr != nil { - // check if the chunk is already loaded - if docInChunk != dvr.curChunkNumber() { - err := dvr.loadDvChunk(docInChunk, s) - if err != nil { - return dvs, err - } - } - - _ = dvr.visitDocValues(localDocNum, visitor) - } - } - return dvs, nil -} - -// VisitableDocValueFields returns the list of fields with -// persisted doc value terms ready to be visitable using the -// VisitDocumentFieldTerms method. -func (s *SegmentBase) VisitableDocValueFields() ([]string, error) { - return s.fieldDvNames, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v13/enumerator.go b/vendor/github.com/blevesearch/zapx/v13/enumerator.go deleted file mode 100644 index 972a22416..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/enumerator.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - - "github.com/blevesearch/vellum" -) - -// enumerator provides an ordered traversal of multiple vellum -// iterators. Like JOIN of iterators, the enumerator produces a -// sequence of (key, iteratorIndex, value) tuples, sorted by key ASC, -// then iteratorIndex ASC, where the same key might be seen or -// repeated across multiple child iterators. -type enumerator struct { - itrs []vellum.Iterator - currKs [][]byte - currVs []uint64 - - lowK []byte - lowIdxs []int - lowCurr int -} - -// newEnumerator returns a new enumerator over the vellum Iterators -func newEnumerator(itrs []vellum.Iterator) (*enumerator, error) { - rv := &enumerator{ - itrs: itrs, - currKs: make([][]byte, len(itrs)), - currVs: make([]uint64, len(itrs)), - lowIdxs: make([]int, 0, len(itrs)), - } - for i, itr := range rv.itrs { - rv.currKs[i], rv.currVs[i] = itr.Current() - } - rv.updateMatches(false) - if rv.lowK == nil && len(rv.lowIdxs) == 0 { - return rv, vellum.ErrIteratorDone - } - return rv, nil -} - -// updateMatches maintains the low key matches based on the currKs -func (m *enumerator) updateMatches(skipEmptyKey bool) { - m.lowK = nil - m.lowIdxs = m.lowIdxs[:0] - m.lowCurr = 0 - - for i, key := range m.currKs { - if (key == nil && m.currVs[i] == 0) || // in case of empty iterator - (len(key) == 0 && skipEmptyKey) { // skip empty keys - continue - } - - cmp := bytes.Compare(key, m.lowK) - if cmp < 0 || len(m.lowIdxs) == 0 { - // reached a new low - m.lowK = key - m.lowIdxs = m.lowIdxs[:0] - m.lowIdxs = append(m.lowIdxs, i) - } else if cmp == 0 { - m.lowIdxs = append(m.lowIdxs, i) - } - } -} - -// Current returns the enumerator's current key, iterator-index, and -// value. If the enumerator is not pointing at a valid value (because -// Next returned an error previously), Current will return nil,0,0. -func (m *enumerator) Current() ([]byte, int, uint64) { - var i int - var v uint64 - if m.lowCurr < len(m.lowIdxs) { - i = m.lowIdxs[m.lowCurr] - v = m.currVs[i] - } - return m.lowK, i, v -} - -// GetLowIdxsAndValues will return all of the iterator indices -// which point to the current key, and their corresponding -// values. This can be used by advanced caller which may need -// to peek into these other sets of data before processing. -func (m *enumerator) GetLowIdxsAndValues() ([]int, []uint64) { - values := make([]uint64, 0, len(m.lowIdxs)) - for _, idx := range m.lowIdxs { - values = append(values, m.currVs[idx]) - } - return m.lowIdxs, values -} - -// Next advances the enumerator to the next key/iterator/value result, -// else vellum.ErrIteratorDone is returned. -func (m *enumerator) Next() error { - m.lowCurr += 1 - if m.lowCurr >= len(m.lowIdxs) { - // move all the current low iterators forwards - for _, vi := range m.lowIdxs { - err := m.itrs[vi].Next() - if err != nil && err != vellum.ErrIteratorDone { - return err - } - m.currKs[vi], m.currVs[vi] = m.itrs[vi].Current() - } - // can skip any empty keys encountered at this point - m.updateMatches(true) - } - if m.lowK == nil && len(m.lowIdxs) == 0 { - return vellum.ErrIteratorDone - } - return nil -} - -// Close all the underlying Iterators. The first error, if any, will -// be returned. -func (m *enumerator) Close() error { - var rv error - for _, itr := range m.itrs { - err := itr.Close() - if rv == nil { - rv = err - } - } - return rv -} diff --git a/vendor/github.com/blevesearch/zapx/v13/go.mod b/vendor/github.com/blevesearch/zapx/v13/go.mod deleted file mode 100644 index 7036e8689..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/blevesearch/zapx/v13 - -go 1.12 - -require ( - github.com/RoaringBitmap/roaring v0.4.23 - github.com/blevesearch/bleve_index_api v1.0.0 - github.com/blevesearch/mmap-go v1.0.2 - github.com/blevesearch/scorch_segment_api/v2 v2.0.1 - github.com/blevesearch/vellum v1.0.3 - github.com/golang/snappy v0.0.1 - github.com/spf13/cobra v0.0.5 -) diff --git a/vendor/github.com/blevesearch/zapx/v13/go.sum b/vendor/github.com/blevesearch/zapx/v13/go.sum deleted file mode 100644 index 68e45348c..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/go.sum +++ /dev/null @@ -1,73 +0,0 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= -github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= -github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= -github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1 h1:fd+hPtZ8GsbqPK1HslGp7Vhoik4arZteA/IsCEgOisw= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1/go.mod h1:lq7yK2jQy1yQjtjTfU931aVqz7pYxEudHaDwOt1tXfU= -github.com/blevesearch/vellum v1.0.3 h1:U86G41A7CtXNzzpIJHM8lSTUqz1Mp8U870TkcdCzZc8= -github.com/blevesearch/vellum v1.0.3/go.mod h1:2u5ax02KeDuNWu4/C+hVQMD6uLN4txH1JbtpaDNLJRo= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= -github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/blevesearch/zapx/v13/intDecoder.go b/vendor/github.com/blevesearch/zapx/v13/intDecoder.go deleted file mode 100644 index e96809314..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/intDecoder.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) 2019 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "fmt" -) - -type chunkedIntDecoder struct { - startOffset uint64 - dataStartOffset uint64 - chunkOffsets []uint64 - curChunkBytes []byte - data []byte - r *memUvarintReader -} - -func newChunkedIntDecoder(buf []byte, offset uint64) *chunkedIntDecoder { - rv := &chunkedIntDecoder{startOffset: offset, data: buf} - var n, numChunks uint64 - var read int - if offset == termNotEncoded { - numChunks = 0 - } else { - numChunks, read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64]) - } - - n += uint64(read) - if cap(rv.chunkOffsets) >= int(numChunks) { - rv.chunkOffsets = rv.chunkOffsets[:int(numChunks)] - } else { - rv.chunkOffsets = make([]uint64, int(numChunks)) - } - for i := 0; i < int(numChunks); i++ { - rv.chunkOffsets[i], read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64]) - n += uint64(read) - } - rv.dataStartOffset = offset + n - return rv -} - -func (d *chunkedIntDecoder) loadChunk(chunk int) error { - if d.startOffset == termNotEncoded { - d.r = newMemUvarintReader([]byte(nil)) - return nil - } - - if chunk >= len(d.chunkOffsets) { - return fmt.Errorf("tried to load freq chunk that doesn't exist %d/(%d)", - chunk, len(d.chunkOffsets)) - } - - end, start := d.dataStartOffset, d.dataStartOffset - s, e := readChunkBoundary(chunk, d.chunkOffsets) - start += s - end += e - d.curChunkBytes = d.data[start:end] - if d.r == nil { - d.r = newMemUvarintReader(d.curChunkBytes) - } else { - d.r.Reset(d.curChunkBytes) - } - - return nil -} - -func (d *chunkedIntDecoder) reset() { - d.startOffset = 0 - d.dataStartOffset = 0 - d.chunkOffsets = d.chunkOffsets[:0] - d.curChunkBytes = d.curChunkBytes[:0] - d.data = d.data[:0] - if d.r != nil { - d.r.Reset([]byte(nil)) - } -} - -func (d *chunkedIntDecoder) isNil() bool { - return d.curChunkBytes == nil -} - -func (d *chunkedIntDecoder) readUvarint() (uint64, error) { - return d.r.ReadUvarint() -} - -func (d *chunkedIntDecoder) SkipUvarint() { - d.r.SkipUvarint() -} - -func (d *chunkedIntDecoder) SkipBytes(count int) { - d.r.SkipBytes(count) -} - -func (d *chunkedIntDecoder) Len() int { - return d.r.Len() -} diff --git a/vendor/github.com/blevesearch/zapx/v13/intcoder.go b/vendor/github.com/blevesearch/zapx/v13/intcoder.go deleted file mode 100644 index c3c488fb7..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/intcoder.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "io" -) - -// We can safely use 0 to represent termNotEncoded since 0 -// could never be a valid address for term location information. -// (stored field index is always non-empty and earlier in the -// file) -const termNotEncoded = 0 - -type chunkedIntCoder struct { - final []byte - chunkSize uint64 - chunkBuf bytes.Buffer - chunkLens []uint64 - currChunk uint64 - - buf []byte -} - -// newChunkedIntCoder returns a new chunk int coder which packs data into -// chunks based on the provided chunkSize and supports up to the specified -// maxDocNum -func newChunkedIntCoder(chunkSize uint64, maxDocNum uint64) *chunkedIntCoder { - total := maxDocNum/chunkSize + 1 - rv := &chunkedIntCoder{ - chunkSize: chunkSize, - chunkLens: make([]uint64, total), - final: make([]byte, 0, 64), - } - - return rv -} - -// Reset lets you reuse this chunked int coder. buffers are reset and reused -// from previous use. you cannot change the chunk size or max doc num. -func (c *chunkedIntCoder) Reset() { - c.final = c.final[:0] - c.chunkBuf.Reset() - c.currChunk = 0 - for i := range c.chunkLens { - c.chunkLens[i] = 0 - } -} - -// SetChunkSize changes the chunk size. It is only valid to do so -// with a new chunkedIntCoder, or immediately after calling Reset() -func (c *chunkedIntCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { - total := int(maxDocNum/chunkSize + 1) - c.chunkSize = chunkSize - if cap(c.chunkLens) < total { - c.chunkLens = make([]uint64, total) - } else { - c.chunkLens = c.chunkLens[:total] - } -} - -// Add encodes the provided integers into the correct chunk for the provided -// doc num. You MUST call Add() with increasing docNums. -func (c *chunkedIntCoder) Add(docNum uint64, vals ...uint64) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // starting a new chunk - c.Close() - c.chunkBuf.Reset() - c.currChunk = chunk - } - - if len(c.buf) < binary.MaxVarintLen64 { - c.buf = make([]byte, binary.MaxVarintLen64) - } - - for _, val := range vals { - wb := binary.PutUvarint(c.buf, val) - _, err := c.chunkBuf.Write(c.buf[:wb]) - if err != nil { - return err - } - } - - return nil -} - -func (c *chunkedIntCoder) AddBytes(docNum uint64, buf []byte) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // starting a new chunk - c.Close() - c.chunkBuf.Reset() - c.currChunk = chunk - } - - _, err := c.chunkBuf.Write(buf) - return err -} - -// Close indicates you are done calling Add() this allows the final chunk -// to be encoded. -func (c *chunkedIntCoder) Close() { - encodingBytes := c.chunkBuf.Bytes() - c.chunkLens[c.currChunk] = uint64(len(encodingBytes)) - c.final = append(c.final, encodingBytes...) - c.currChunk = uint64(cap(c.chunkLens)) // sentinel to detect double close -} - -// Write commits all the encoded chunked integers to the provided writer. -func (c *chunkedIntCoder) Write(w io.Writer) (int, error) { - bufNeeded := binary.MaxVarintLen64 * (1 + len(c.chunkLens)) - if len(c.buf) < bufNeeded { - c.buf = make([]byte, bufNeeded) - } - buf := c.buf - - // convert the chunk lengths into chunk offsets - chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens) - - // write out the number of chunks & each chunk offsets - n := binary.PutUvarint(buf, uint64(len(chunkOffsets))) - for _, chunkOffset := range chunkOffsets { - n += binary.PutUvarint(buf[n:], chunkOffset) - } - - tw, err := w.Write(buf[:n]) - if err != nil { - return tw, err - } - - // write out the data - nw, err := w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - return tw, nil -} - -// writeAt commits all the encoded chunked integers to the provided writer -// and returns the starting offset, total bytes written and an error -func (c *chunkedIntCoder) writeAt(w io.Writer) (uint64, int, error) { - startOffset := uint64(termNotEncoded) - if len(c.final) <= 0 { - return startOffset, 0, nil - } - - if chw := w.(*CountHashWriter); chw != nil { - startOffset = uint64(chw.Count()) - } - - tw, err := c.Write(w) - return startOffset, tw, err -} - -func (c *chunkedIntCoder) FinalSize() int { - return len(c.final) -} - -// modifyLengthsToEndOffsets converts the chunk length array -// to a chunk offset array. The readChunkBoundary -// will figure out the start and end of every chunk from -// these offsets. Starting offset of i'th index is stored -// in i-1'th position except for 0'th index and ending offset -// is stored at i'th index position. -// For 0'th element, starting position is always zero. -// eg: -// Lens -> 5 5 5 5 => 5 10 15 20 -// Lens -> 0 5 0 5 => 0 5 5 10 -// Lens -> 0 0 0 5 => 0 0 0 5 -// Lens -> 5 0 0 0 => 5 5 5 5 -// Lens -> 0 5 0 0 => 0 5 5 5 -// Lens -> 0 0 5 0 => 0 0 5 5 -func modifyLengthsToEndOffsets(lengths []uint64) []uint64 { - var runningOffset uint64 - var index, i int - for i = 1; i <= len(lengths); i++ { - runningOffset += lengths[i-1] - lengths[index] = runningOffset - index++ - } - return lengths -} - -func readChunkBoundary(chunk int, offsets []uint64) (uint64, uint64) { - var start uint64 - if chunk > 0 { - start = offsets[chunk-1] - } - return start, offsets[chunk] -} diff --git a/vendor/github.com/blevesearch/zapx/v13/memuvarint.go b/vendor/github.com/blevesearch/zapx/v13/memuvarint.go deleted file mode 100644 index 0c10c83a4..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/memuvarint.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" -) - -type memUvarintReader struct { - C int // index of next byte to read from S - S []byte -} - -func newMemUvarintReader(s []byte) *memUvarintReader { - return &memUvarintReader{S: s} -} - -// Len returns the number of unread bytes. -func (r *memUvarintReader) Len() int { - n := len(r.S) - r.C - if n < 0 { - return 0 - } - return n -} - -// ReadUvarint reads an encoded uint64. The original code this was -// based on is at encoding/binary/ReadUvarint(). -func (r *memUvarintReader) ReadUvarint() (uint64, error) { - var x uint64 - var s uint - var C = r.C - var S = r.S - - for { - b := S[C] - C++ - - if b < 0x80 { - r.C = C - - // why 63? The original code had an 'i += 1' loop var and - // checked for i > 9 || i == 9 ...; but, we no longer - // check for the i var, but instead check here for s, - // which is incremented by 7. So, 7*9 == 63. - // - // why the "extra" >= check? The normal case is that s < - // 63, so we check this single >= guard first so that we - // hit the normal, nil-error return pathway sooner. - if s >= 63 && (s > 63 || s == 63 && b > 1) { - return 0, fmt.Errorf("memUvarintReader overflow") - } - - return x | uint64(b)< 0 { - storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops, - fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - - dictLocs, docValueOffset, err = persistMergedRest(segments, drops, - fieldsInv, fieldsMap, fieldsSame, - newDocNums, numDocs, chunkMode, cr, closeCh) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - } else { - dictLocs = make([]uint64, len(fieldsInv)) - } - - fieldsIndexOffset, err = persistFields(fieldsInv, cr, dictLocs) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - - return newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, dictLocs, fieldsInv, fieldsMap, nil -} - -// mapFields takes the fieldsInv list and returns a map of fieldName -// to fieldID+1 -func mapFields(fields []string) map[string]uint16 { - rv := make(map[string]uint16, len(fields)) - for i, fieldName := range fields { - rv[fieldName] = uint16(i) + 1 - } - return rv -} - -// computeNewDocCount determines how many documents will be in the newly -// merged segment when obsoleted docs are dropped -func computeNewDocCount(segments []*SegmentBase, drops []*roaring.Bitmap) uint64 { - var newDocCount uint64 - for segI, segment := range segments { - newDocCount += segment.numDocs - if drops[segI] != nil { - newDocCount -= drops[segI].GetCardinality() - } - } - return newDocCount -} - -func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap, - fieldsInv []string, fieldsMap map[string]uint16, fieldsSame bool, - newDocNumsIn [][]uint64, newSegDocCount uint64, chunkMode uint32, - w *CountHashWriter, closeCh chan struct{}) ([]uint64, uint64, error) { - var bufMaxVarintLen64 []byte = make([]byte, binary.MaxVarintLen64) - var bufLoc []uint64 - - var postings *PostingsList - var postItr *PostingsIterator - - rv := make([]uint64, len(fieldsInv)) - fieldDvLocsStart := make([]uint64, len(fieldsInv)) - fieldDvLocsEnd := make([]uint64, len(fieldsInv)) - - // these int coders are initialized with chunk size 1024 - // however this will be reset to the correct chunk size - // while processing each individual field-term section - tfEncoder := newChunkedIntCoder(1024, newSegDocCount-1) - locEncoder := newChunkedIntCoder(1024, newSegDocCount-1) - - var vellumBuf bytes.Buffer - newVellum, err := vellum.New(&vellumBuf, nil) - if err != nil { - return nil, 0, err - } - - newRoaring := roaring.NewBitmap() - - // for each field - for fieldID, fieldName := range fieldsInv { - // collect FST iterators from all active segments for this field - var newDocNums [][]uint64 - var drops []*roaring.Bitmap - var dicts []*Dictionary - var itrs []vellum.Iterator - - var segmentsInFocus []*SegmentBase - - for segmentI, segment := range segments { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - dict, err2 := segment.dictionary(fieldName) - if err2 != nil { - return nil, 0, err2 - } - if dict != nil && dict.fst != nil { - itr, err2 := dict.fst.Iterator(nil, nil) - if err2 != nil && err2 != vellum.ErrIteratorDone { - return nil, 0, err2 - } - if itr != nil { - newDocNums = append(newDocNums, newDocNumsIn[segmentI]) - if dropsIn[segmentI] != nil && !dropsIn[segmentI].IsEmpty() { - drops = append(drops, dropsIn[segmentI]) - } else { - drops = append(drops, nil) - } - dicts = append(dicts, dict) - itrs = append(itrs, itr) - segmentsInFocus = append(segmentsInFocus, segment) - } - } - } - - var prevTerm []byte - - newRoaring.Clear() - - var lastDocNum, lastFreq, lastNorm uint64 - - // determines whether to use "1-hit" encoding optimization - // when a term appears in only 1 doc, with no loc info, - // has freq of 1, and the docNum fits into 31-bits - use1HitEncoding := func(termCardinality uint64) (bool, uint64, uint64) { - if termCardinality == uint64(1) && locEncoder.FinalSize() <= 0 { - docNum := uint64(newRoaring.Minimum()) - if under32Bits(docNum) && docNum == lastDocNum && lastFreq == 1 { - return true, docNum, lastNorm - } - } - return false, 0, 0 - } - - finishTerm := func(term []byte) error { - tfEncoder.Close() - locEncoder.Close() - - postingsOffset, err := writePostings(newRoaring, - tfEncoder, locEncoder, use1HitEncoding, w, bufMaxVarintLen64) - if err != nil { - return err - } - - if postingsOffset > 0 { - err = newVellum.Insert(term, postingsOffset) - if err != nil { - return err - } - } - - newRoaring.Clear() - - tfEncoder.Reset() - locEncoder.Reset() - - lastDocNum = 0 - lastFreq = 0 - lastNorm = 0 - - return nil - } - - enumerator, err := newEnumerator(itrs) - - for err == nil { - term, itrI, postingsOffset := enumerator.Current() - - if !bytes.Equal(prevTerm, term) { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - // if the term changed, write out the info collected - // for the previous term - err = finishTerm(prevTerm) - if err != nil { - return nil, 0, err - } - } - if !bytes.Equal(prevTerm, term) || prevTerm == nil { - // compute cardinality of field-term in new seg - var newCard uint64 - lowItrIdxs, lowItrVals := enumerator.GetLowIdxsAndValues() - for i, idx := range lowItrIdxs { - pl, err := dicts[idx].postingsListFromOffset(lowItrVals[i], drops[idx], nil) - if err != nil { - return nil, 0, err - } - newCard += pl.Count() - } - // compute correct chunk size with this - chunkSize, err := getChunkSize(chunkMode, newCard, newSegDocCount) - if err != nil { - return nil, 0, err - } - // update encoders chunk - tfEncoder.SetChunkSize(chunkSize, newSegDocCount-1) - locEncoder.SetChunkSize(chunkSize, newSegDocCount-1) - } - - postings, err = dicts[itrI].postingsListFromOffset( - postingsOffset, drops[itrI], postings) - if err != nil { - return nil, 0, err - } - - postItr = postings.iterator(true, true, true, postItr) - - // can no longer optimize by copying, since chunk factor could have changed - lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs( - fieldsMap, term, postItr, newDocNums[itrI], newRoaring, - tfEncoder, locEncoder, bufLoc) - - if err != nil { - return nil, 0, err - } - - prevTerm = prevTerm[:0] // copy to prevTerm in case Next() reuses term mem - prevTerm = append(prevTerm, term...) - - err = enumerator.Next() - } - if err != vellum.ErrIteratorDone { - return nil, 0, err - } - - err = finishTerm(prevTerm) - if err != nil { - return nil, 0, err - } - - dictOffset := uint64(w.Count()) - - err = newVellum.Close() - if err != nil { - return nil, 0, err - } - vellumData := vellumBuf.Bytes() - - // write out the length of the vellum data - n := binary.PutUvarint(bufMaxVarintLen64, uint64(len(vellumData))) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return nil, 0, err - } - - // write this vellum to disk - _, err = w.Write(vellumData) - if err != nil { - return nil, 0, err - } - - rv[fieldID] = dictOffset - - // get the field doc value offset (start) - fieldDvLocsStart[fieldID] = uint64(w.Count()) - - // update the field doc values - // NOTE: doc values continue to use legacy chunk mode - chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0) - if err != nil { - return nil, 0, err - } - fdvEncoder := newChunkedContentCoder(chunkSize, newSegDocCount-1, w, true) - - fdvReadersAvailable := false - var dvIterClone *docValueReader - for segmentI, segment := range segmentsInFocus { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - fieldIDPlus1 := uint16(segment.fieldsMap[fieldName]) - if dvIter, exists := segment.fieldDvReaders[fieldIDPlus1-1]; exists && - dvIter != nil { - fdvReadersAvailable = true - dvIterClone = dvIter.cloneInto(dvIterClone) - err = dvIterClone.iterateAllDocValues(segment, func(docNum uint64, terms []byte) error { - if newDocNums[segmentI][docNum] == docDropped { - return nil - } - err := fdvEncoder.Add(newDocNums[segmentI][docNum], terms) - if err != nil { - return err - } - return nil - }) - if err != nil { - return nil, 0, err - } - } - } - - if fdvReadersAvailable { - err = fdvEncoder.Close() - if err != nil { - return nil, 0, err - } - - // persist the doc value details for this field - _, err = fdvEncoder.Write() - if err != nil { - return nil, 0, err - } - - // get the field doc value offset (end) - fieldDvLocsEnd[fieldID] = uint64(w.Count()) - } else { - fieldDvLocsStart[fieldID] = fieldNotUninverted - fieldDvLocsEnd[fieldID] = fieldNotUninverted - } - - // reset vellum buffer and vellum builder - vellumBuf.Reset() - err = newVellum.Reset(&vellumBuf) - if err != nil { - return nil, 0, err - } - } - - fieldDvLocsOffset := uint64(w.Count()) - - buf := bufMaxVarintLen64 - for i := 0; i < len(fieldDvLocsStart); i++ { - n := binary.PutUvarint(buf, fieldDvLocsStart[i]) - _, err := w.Write(buf[:n]) - if err != nil { - return nil, 0, err - } - n = binary.PutUvarint(buf, fieldDvLocsEnd[i]) - _, err = w.Write(buf[:n]) - if err != nil { - return nil, 0, err - } - } - - return rv, fieldDvLocsOffset, nil -} - -func mergeTermFreqNormLocs(fieldsMap map[string]uint16, term []byte, postItr *PostingsIterator, - newDocNums []uint64, newRoaring *roaring.Bitmap, - tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, bufLoc []uint64) ( - lastDocNum uint64, lastFreq uint64, lastNorm uint64, bufLocOut []uint64, err error) { - next, err := postItr.Next() - for next != nil && err == nil { - hitNewDocNum := newDocNums[next.Number()] - if hitNewDocNum == docDropped { - return 0, 0, 0, nil, fmt.Errorf("see hit with dropped docNum") - } - - newRoaring.Add(uint32(hitNewDocNum)) - - nextFreq := next.Frequency() - nextNorm := uint64(math.Float32bits(float32(next.Norm()))) - - locs := next.Locations() - - err = tfEncoder.Add(hitNewDocNum, - encodeFreqHasLocs(nextFreq, len(locs) > 0), nextNorm) - if err != nil { - return 0, 0, 0, nil, err - } - - if len(locs) > 0 { - numBytesLocs := 0 - for _, loc := range locs { - ap := loc.ArrayPositions() - numBytesLocs += totalUvarintBytes(uint64(fieldsMap[loc.Field()]-1), - loc.Pos(), loc.Start(), loc.End(), uint64(len(ap)), ap) - } - - err = locEncoder.Add(hitNewDocNum, uint64(numBytesLocs)) - if err != nil { - return 0, 0, 0, nil, err - } - - for _, loc := range locs { - ap := loc.ArrayPositions() - if cap(bufLoc) < 5+len(ap) { - bufLoc = make([]uint64, 0, 5+len(ap)) - } - args := bufLoc[0:5] - args[0] = uint64(fieldsMap[loc.Field()] - 1) - args[1] = loc.Pos() - args[2] = loc.Start() - args[3] = loc.End() - args[4] = uint64(len(ap)) - args = append(args, ap...) - err = locEncoder.Add(hitNewDocNum, args...) - if err != nil { - return 0, 0, 0, nil, err - } - } - } - - lastDocNum = hitNewDocNum - lastFreq = nextFreq - lastNorm = nextNorm - - next, err = postItr.Next() - } - - return lastDocNum, lastFreq, lastNorm, bufLoc, err -} - -func writePostings(postings *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder, - use1HitEncoding func(uint64) (bool, uint64, uint64), - w *CountHashWriter, bufMaxVarintLen64 []byte) ( - offset uint64, err error) { - termCardinality := postings.GetCardinality() - if termCardinality <= 0 { - return 0, nil - } - - if use1HitEncoding != nil { - encodeAs1Hit, docNum1Hit, normBits1Hit := use1HitEncoding(termCardinality) - if encodeAs1Hit { - return FSTValEncode1Hit(docNum1Hit, normBits1Hit), nil - } - } - - var tfOffset uint64 - tfOffset, _, err = tfEncoder.writeAt(w) - if err != nil { - return 0, err - } - - var locOffset uint64 - locOffset, _, err = locEncoder.writeAt(w) - if err != nil { - return 0, err - } - - postingsOffset := uint64(w.Count()) - - n := binary.PutUvarint(bufMaxVarintLen64, tfOffset) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return 0, err - } - - n = binary.PutUvarint(bufMaxVarintLen64, locOffset) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return 0, err - } - - _, err = writeRoaringWithLen(postings, w, bufMaxVarintLen64) - if err != nil { - return 0, err - } - - return postingsOffset, nil -} - -type varintEncoder func(uint64) (int, error) - -func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap, - fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64, - w *CountHashWriter, closeCh chan struct{}) (uint64, [][]uint64, error) { - var rv [][]uint64 // The remapped or newDocNums for each segment. - - var newDocNum uint64 - - var curr int - var data, compressed []byte - var metaBuf bytes.Buffer - varBuf := make([]byte, binary.MaxVarintLen64) - metaEncode := func(val uint64) (int, error) { - wb := binary.PutUvarint(varBuf, val) - return metaBuf.Write(varBuf[:wb]) - } - - vals := make([][][]byte, len(fieldsInv)) - typs := make([][]byte, len(fieldsInv)) - poss := make([][][]uint64, len(fieldsInv)) - - var posBuf []uint64 - - docNumOffsets := make([]uint64, newSegDocCount) - - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - defer visitDocumentCtxPool.Put(vdc) - - // for each segment - for segI, segment := range segments { - // check for the closure in meantime - if isClosed(closeCh) { - return 0, nil, seg.ErrClosed - } - - segNewDocNums := make([]uint64, segment.numDocs) - - dropsI := drops[segI] - - // optimize when the field mapping is the same across all - // segments and there are no deletions, via byte-copying - // of stored docs bytes directly to the writer - if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) { - err := segment.copyStoredDocs(newDocNum, docNumOffsets, w) - if err != nil { - return 0, nil, err - } - - for i := uint64(0); i < segment.numDocs; i++ { - segNewDocNums[i] = newDocNum - newDocNum++ - } - rv = append(rv, segNewDocNums) - - continue - } - - // for each doc num - for docNum := uint64(0); docNum < segment.numDocs; docNum++ { - // TODO: roaring's API limits docNums to 32-bits? - if dropsI != nil && dropsI.Contains(uint32(docNum)) { - segNewDocNums[docNum] = docDropped - continue - } - - segNewDocNums[docNum] = newDocNum - - curr = 0 - metaBuf.Reset() - data = data[:0] - - posTemp := posBuf - - // collect all the data - for i := 0; i < len(fieldsInv); i++ { - vals[i] = vals[i][:0] - typs[i] = typs[i][:0] - poss[i] = poss[i][:0] - } - err := segment.visitStoredFields(vdc, docNum, func(field string, typ byte, value []byte, pos []uint64) bool { - fieldID := int(fieldsMap[field]) - 1 - vals[fieldID] = append(vals[fieldID], value) - typs[fieldID] = append(typs[fieldID], typ) - - // copy array positions to preserve them beyond the scope of this callback - var curPos []uint64 - if len(pos) > 0 { - if cap(posTemp) < len(pos) { - posBuf = make([]uint64, len(pos)*len(fieldsInv)) - posTemp = posBuf - } - curPos = posTemp[0:len(pos)] - copy(curPos, pos) - posTemp = posTemp[len(pos):] - } - poss[fieldID] = append(poss[fieldID], curPos) - - return true - }) - if err != nil { - return 0, nil, err - } - - // _id field special case optimizes ExternalID() lookups - idFieldVal := vals[uint16(0)][0] - _, err = metaEncode(uint64(len(idFieldVal))) - if err != nil { - return 0, nil, err - } - - // now walk the non-"_id" fields in order - for fieldID := 1; fieldID < len(fieldsInv); fieldID++ { - storedFieldValues := vals[fieldID] - - stf := typs[fieldID] - spf := poss[fieldID] - - var err2 error - curr, data, err2 = persistStoredFieldValues(fieldID, - storedFieldValues, stf, spf, curr, metaEncode, data) - if err2 != nil { - return 0, nil, err2 - } - } - - metaBytes := metaBuf.Bytes() - - compressed = snappy.Encode(compressed[:cap(compressed)], data) - - // record where we're about to start writing - docNumOffsets[newDocNum] = uint64(w.Count()) - - // write out the meta len and compressed data len - _, err = writeUvarints(w, - uint64(len(metaBytes)), - uint64(len(idFieldVal)+len(compressed))) - if err != nil { - return 0, nil, err - } - // now write the meta - _, err = w.Write(metaBytes) - if err != nil { - return 0, nil, err - } - // now write the _id field val (counted as part of the 'compressed' data) - _, err = w.Write(idFieldVal) - if err != nil { - return 0, nil, err - } - // now write the compressed data - _, err = w.Write(compressed) - if err != nil { - return 0, nil, err - } - - newDocNum++ - } - - rv = append(rv, segNewDocNums) - } - - // return value is the start of the stored index - storedIndexOffset := uint64(w.Count()) - - // now write out the stored doc index - for _, docNumOffset := range docNumOffsets { - err := binary.Write(w, binary.BigEndian, docNumOffset) - if err != nil { - return 0, nil, err - } - } - - return storedIndexOffset, rv, nil -} - -// copyStoredDocs writes out a segment's stored doc info, optimized by -// using a single Write() call for the entire set of bytes. The -// newDocNumOffsets is filled with the new offsets for each doc. -func (s *SegmentBase) copyStoredDocs(newDocNum uint64, newDocNumOffsets []uint64, - w *CountHashWriter) error { - if s.numDocs <= 0 { - return nil - } - - indexOffset0, storedOffset0, _, _, _ := - s.getDocStoredOffsets(0) // the segment's first doc - - indexOffsetN, storedOffsetN, readN, metaLenN, dataLenN := - s.getDocStoredOffsets(s.numDocs - 1) // the segment's last doc - - storedOffset0New := uint64(w.Count()) - - storedBytes := s.mem[storedOffset0 : storedOffsetN+readN+metaLenN+dataLenN] - _, err := w.Write(storedBytes) - if err != nil { - return err - } - - // remap the storedOffset's for the docs into new offsets relative - // to storedOffset0New, filling the given docNumOffsetsOut array - for indexOffset := indexOffset0; indexOffset <= indexOffsetN; indexOffset += 8 { - storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) - storedOffsetNew := storedOffset - storedOffset0 + storedOffset0New - newDocNumOffsets[newDocNum] = storedOffsetNew - newDocNum += 1 - } - - return nil -} - -// mergeFields builds a unified list of fields used across all the -// input segments, and computes whether the fields are the same across -// segments (which depends on fields to be sorted in the same way -// across segments) -func mergeFields(segments []*SegmentBase) (bool, []string) { - fieldsSame := true - - var segment0Fields []string - if len(segments) > 0 { - segment0Fields = segments[0].Fields() - } - - fieldsExist := map[string]struct{}{} - for _, segment := range segments { - fields := segment.Fields() - for fieldi, field := range fields { - fieldsExist[field] = struct{}{} - if len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field { - fieldsSame = false - } - } - } - - rv := make([]string, 0, len(fieldsExist)) - // ensure _id stays first - rv = append(rv, "_id") - for k := range fieldsExist { - if k != "_id" { - rv = append(rv, k) - } - } - - sort.Strings(rv[1:]) // leave _id as first - - return fieldsSame, rv -} - -func isClosed(closeCh chan struct{}) bool { - select { - case <-closeCh: - return true - default: - return false - } -} diff --git a/vendor/github.com/blevesearch/zapx/v13/posting.go b/vendor/github.com/blevesearch/zapx/v13/posting.go deleted file mode 100644 index d6c61a42c..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/posting.go +++ /dev/null @@ -1,796 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "fmt" - "math" - "reflect" - - "github.com/RoaringBitmap/roaring" - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -var reflectStaticSizePostingsList int -var reflectStaticSizePostingsIterator int -var reflectStaticSizePosting int -var reflectStaticSizeLocation int - -func init() { - var pl PostingsList - reflectStaticSizePostingsList = int(reflect.TypeOf(pl).Size()) - var pi PostingsIterator - reflectStaticSizePostingsIterator = int(reflect.TypeOf(pi).Size()) - var p Posting - reflectStaticSizePosting = int(reflect.TypeOf(p).Size()) - var l Location - reflectStaticSizeLocation = int(reflect.TypeOf(l).Size()) -} - -// FST or vellum value (uint64) encoding is determined by the top two -// highest-order or most significant bits... -// -// encoding : MSB -// name : 63 62 61...to...bit #0 (LSB) -// ----------+---+---+--------------------------------------------------- -// general : 0 | 0 | 62-bits of postingsOffset. -// ~ : 0 | 1 | reserved for future. -// 1-hit : 1 | 0 | 31-bits of positive float31 norm | 31-bits docNum. -// ~ : 1 | 1 | reserved for future. -// -// Encoding "general" is able to handle all cases, where the -// postingsOffset points to more information about the postings for -// the term. -// -// Encoding "1-hit" is used to optimize a commonly seen case when a -// term has only a single hit. For example, a term in the _id field -// will have only 1 hit. The "1-hit" encoding is used for a term -// in a field when... -// -// - term vector info is disabled for that field; -// - and, the term appears in only a single doc for that field; -// - and, the term's freq is exactly 1 in that single doc for that field; -// - and, the docNum must fit into 31-bits; -// -// Otherwise, the "general" encoding is used instead. -// -// In the "1-hit" encoding, the field in that single doc may have -// other terms, which is supported in the "1-hit" encoding by the -// positive float31 norm. - -const FSTValEncodingMask = uint64(0xc000000000000000) -const FSTValEncodingGeneral = uint64(0x0000000000000000) -const FSTValEncoding1Hit = uint64(0x8000000000000000) - -func FSTValEncode1Hit(docNum uint64, normBits uint64) uint64 { - return FSTValEncoding1Hit | ((mask31Bits & normBits) << 31) | (mask31Bits & docNum) -} - -func FSTValDecode1Hit(v uint64) (docNum uint64, normBits uint64) { - return (mask31Bits & v), (mask31Bits & (v >> 31)) -} - -const mask31Bits = uint64(0x000000007fffffff) - -func under32Bits(x uint64) bool { - return x <= mask31Bits -} - -const DocNum1HitFinished = math.MaxUint64 - -var NormBits1Hit = uint64(math.Float32bits(float32(1))) - -// PostingsList is an in-memory representation of a postings list -type PostingsList struct { - sb *SegmentBase - postingsOffset uint64 - freqOffset uint64 - locOffset uint64 - postings *roaring.Bitmap - except *roaring.Bitmap - - // when normBits1Hit != 0, then this postings list came from a - // 1-hit encoding, and only the docNum1Hit & normBits1Hit apply - docNum1Hit uint64 - normBits1Hit uint64 -} - -// represents an immutable, empty postings list -var emptyPostingsList = &PostingsList{} - -func (p *PostingsList) Size() int { - sizeInBytes := reflectStaticSizePostingsList + SizeOfPtr - - if p.except != nil { - sizeInBytes += int(p.except.GetSizeInBytes()) - } - - return sizeInBytes -} - -func (p *PostingsList) OrInto(receiver *roaring.Bitmap) { - if p.normBits1Hit != 0 { - receiver.Add(uint32(p.docNum1Hit)) - return - } - - if p.postings != nil { - receiver.Or(p.postings) - } -} - -// Iterator returns an iterator for this postings list -func (p *PostingsList) Iterator(includeFreq, includeNorm, includeLocs bool, - prealloc segment.PostingsIterator) segment.PostingsIterator { - if p.normBits1Hit == 0 && p.postings == nil { - return emptyPostingsIterator - } - - var preallocPI *PostingsIterator - pi, ok := prealloc.(*PostingsIterator) - if ok && pi != nil { - preallocPI = pi - } - if preallocPI == emptyPostingsIterator { - preallocPI = nil - } - - return p.iterator(includeFreq, includeNorm, includeLocs, preallocPI) -} - -func (p *PostingsList) iterator(includeFreq, includeNorm, includeLocs bool, - rv *PostingsIterator) *PostingsIterator { - if rv == nil { - rv = &PostingsIterator{} - } else { - freqNormReader := rv.freqNormReader - if freqNormReader != nil { - freqNormReader.reset() - } - - locReader := rv.locReader - if locReader != nil { - locReader.reset() - } - - nextLocs := rv.nextLocs[:0] - nextSegmentLocs := rv.nextSegmentLocs[:0] - - buf := rv.buf - - *rv = PostingsIterator{} // clear the struct - - rv.freqNormReader = freqNormReader - rv.locReader = locReader - - rv.nextLocs = nextLocs - rv.nextSegmentLocs = nextSegmentLocs - - rv.buf = buf - } - - rv.postings = p - rv.includeFreqNorm = includeFreq || includeNorm || includeLocs - rv.includeLocs = includeLocs - - if p.normBits1Hit != 0 { - // "1-hit" encoding - rv.docNum1Hit = p.docNum1Hit - rv.normBits1Hit = p.normBits1Hit - - if p.except != nil && p.except.Contains(uint32(rv.docNum1Hit)) { - rv.docNum1Hit = DocNum1HitFinished - } - - return rv - } - - // "general" encoding, check if empty - if p.postings == nil { - return rv - } - - // initialize freq chunk reader - if rv.includeFreqNorm { - rv.freqNormReader = newChunkedIntDecoder(p.sb.mem, p.freqOffset) - } - - // initialize the loc chunk reader - if rv.includeLocs { - rv.locReader = newChunkedIntDecoder(p.sb.mem, p.locOffset) - } - - rv.all = p.postings.Iterator() - if p.except != nil { - rv.ActualBM = roaring.AndNot(p.postings, p.except) - rv.Actual = rv.ActualBM.Iterator() - } else { - rv.ActualBM = p.postings - rv.Actual = rv.all // Optimize to use same iterator for all & Actual. - } - - return rv -} - -// Count returns the number of items on this postings list -func (p *PostingsList) Count() uint64 { - var n, e uint64 - if p.normBits1Hit != 0 { - n = 1 - if p.except != nil && p.except.Contains(uint32(p.docNum1Hit)) { - e = 1 - } - } else if p.postings != nil { - n = p.postings.GetCardinality() - if p.except != nil { - e = p.postings.AndCardinality(p.except) - } - } - return n - e -} - -func (rv *PostingsList) read(postingsOffset uint64, d *Dictionary) error { - rv.postingsOffset = postingsOffset - - // handle "1-hit" encoding special case - if rv.postingsOffset&FSTValEncodingMask == FSTValEncoding1Hit { - return rv.init1Hit(postingsOffset) - } - - // read the location of the freq/norm details - var n uint64 - var read int - - rv.freqOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+binary.MaxVarintLen64]) - n += uint64(read) - - rv.locOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - var postingsLen uint64 - postingsLen, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - roaringBytes := d.sb.mem[postingsOffset+n : postingsOffset+n+postingsLen] - - if rv.postings == nil { - rv.postings = roaring.NewBitmap() - } - _, err := rv.postings.FromBuffer(roaringBytes) - if err != nil { - return fmt.Errorf("error loading roaring bitmap: %v", err) - } - - return nil -} - -func (rv *PostingsList) init1Hit(fstVal uint64) error { - docNum, normBits := FSTValDecode1Hit(fstVal) - - rv.docNum1Hit = docNum - rv.normBits1Hit = normBits - - return nil -} - -// PostingsIterator provides a way to iterate through the postings list -type PostingsIterator struct { - postings *PostingsList - all roaring.IntPeekable - Actual roaring.IntPeekable - ActualBM *roaring.Bitmap - - currChunk uint32 - freqNormReader *chunkedIntDecoder - locReader *chunkedIntDecoder - - next Posting // reused across Next() calls - nextLocs []Location // reused across Next() calls - nextSegmentLocs []segment.Location // reused across Next() calls - - docNum1Hit uint64 - normBits1Hit uint64 - - buf []byte - - includeFreqNorm bool - includeLocs bool -} - -var emptyPostingsIterator = &PostingsIterator{} - -func (i *PostingsIterator) Size() int { - sizeInBytes := reflectStaticSizePostingsIterator + SizeOfPtr + - i.next.Size() - // account for freqNormReader, locReader if we start using this. - for _, entry := range i.nextLocs { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -func (i *PostingsIterator) loadChunk(chunk int) error { - if i.includeFreqNorm { - err := i.freqNormReader.loadChunk(chunk) - if err != nil { - return err - } - } - - if i.includeLocs { - err := i.locReader.loadChunk(chunk) - if err != nil { - return err - } - } - - i.currChunk = uint32(chunk) - return nil -} - -func (i *PostingsIterator) readFreqNormHasLocs() (uint64, uint64, bool, error) { - if i.normBits1Hit != 0 { - return 1, i.normBits1Hit, false, nil - } - - freqHasLocs, err := i.freqNormReader.readUvarint() - if err != nil { - return 0, 0, false, fmt.Errorf("error reading frequency: %v", err) - } - - freq, hasLocs := decodeFreqHasLocs(freqHasLocs) - - normBits, err := i.freqNormReader.readUvarint() - if err != nil { - return 0, 0, false, fmt.Errorf("error reading norm: %v", err) - } - - return freq, normBits, hasLocs, nil -} - -func (i *PostingsIterator) skipFreqNormReadHasLocs() (bool, error) { - if i.normBits1Hit != 0 { - return false, nil - } - - freqHasLocs, err := i.freqNormReader.readUvarint() - if err != nil { - return false, fmt.Errorf("error reading freqHasLocs: %v", err) - } - - i.freqNormReader.SkipUvarint() // Skip normBits. - - return freqHasLocs&0x01 != 0, nil // See decodeFreqHasLocs() / hasLocs. -} - -func encodeFreqHasLocs(freq uint64, hasLocs bool) uint64 { - rv := freq << 1 - if hasLocs { - rv = rv | 0x01 // 0'th LSB encodes whether there are locations - } - return rv -} - -func decodeFreqHasLocs(freqHasLocs uint64) (uint64, bool) { - freq := freqHasLocs >> 1 - hasLocs := freqHasLocs&0x01 != 0 - return freq, hasLocs -} - -// readLocation processes all the integers on the stream representing a single -// location. -func (i *PostingsIterator) readLocation(l *Location) error { - // read off field - fieldID, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location field: %v", err) - } - // read off pos - pos, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location pos: %v", err) - } - // read off start - start, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location start: %v", err) - } - // read off end - end, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location end: %v", err) - } - // read off num array pos - numArrayPos, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location num array pos: %v", err) - } - - l.field = i.postings.sb.fieldsInv[fieldID] - l.pos = pos - l.start = start - l.end = end - - if cap(l.ap) < int(numArrayPos) { - l.ap = make([]uint64, int(numArrayPos)) - } else { - l.ap = l.ap[:int(numArrayPos)] - } - - // read off array positions - for k := 0; k < int(numArrayPos); k++ { - ap, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading array position: %v", err) - } - - l.ap[k] = ap - } - - return nil -} - -// Next returns the next posting on the postings list, or nil at the end -func (i *PostingsIterator) Next() (segment.Posting, error) { - return i.nextAtOrAfter(0) -} - -// Advance returns the posting at the specified docNum or it is not present -// the next posting, or if the end is reached, nil -func (i *PostingsIterator) Advance(docNum uint64) (segment.Posting, error) { - return i.nextAtOrAfter(docNum) -} - -// Next returns the next posting on the postings list, or nil at the end -func (i *PostingsIterator) nextAtOrAfter(atOrAfter uint64) (segment.Posting, error) { - docNum, exists, err := i.nextDocNumAtOrAfter(atOrAfter) - if err != nil || !exists { - return nil, err - } - - i.next = Posting{} // clear the struct - rv := &i.next - rv.docNum = docNum - - if !i.includeFreqNorm { - return rv, nil - } - - var normBits uint64 - var hasLocs bool - - rv.freq, normBits, hasLocs, err = i.readFreqNormHasLocs() - if err != nil { - return nil, err - } - - rv.norm = math.Float32frombits(uint32(normBits)) - - if i.includeLocs && hasLocs { - // prepare locations into reused slices, where we assume - // rv.freq >= "number of locs", since in a composite field, - // some component fields might have their IncludeTermVector - // flags disabled while other component fields are enabled - if cap(i.nextLocs) >= int(rv.freq) { - i.nextLocs = i.nextLocs[0:rv.freq] - } else { - i.nextLocs = make([]Location, rv.freq, rv.freq*2) - } - if cap(i.nextSegmentLocs) < int(rv.freq) { - i.nextSegmentLocs = make([]segment.Location, rv.freq, rv.freq*2) - } - rv.locs = i.nextSegmentLocs[:0] - - numLocsBytes, err := i.locReader.readUvarint() - if err != nil { - return nil, fmt.Errorf("error reading location numLocsBytes: %v", err) - } - - j := 0 - startBytesRemaining := i.locReader.Len() // # bytes remaining in the locReader - for startBytesRemaining-i.locReader.Len() < int(numLocsBytes) { - err := i.readLocation(&i.nextLocs[j]) - if err != nil { - return nil, err - } - rv.locs = append(rv.locs, &i.nextLocs[j]) - j++ - } - } - - return rv, nil -} - -// nextDocNum returns the next docNum on the postings list, and also -// sets up the currChunk / loc related fields of the iterator. -func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool, error) { - if i.normBits1Hit != 0 { - if i.docNum1Hit == DocNum1HitFinished { - return 0, false, nil - } - if i.docNum1Hit < atOrAfter { - // advanced past our 1-hit - i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum - return 0, false, nil - } - docNum := i.docNum1Hit - i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum - return docNum, true, nil - } - - if i.Actual == nil || !i.Actual.HasNext() { - return 0, false, nil - } - - if i.postings == nil || i.postings.postings == i.ActualBM { - return i.nextDocNumAtOrAfterClean(atOrAfter) - } - - i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) - - if !i.Actual.HasNext() { - // couldn't find anything - return 0, false, nil - } - - n := i.Actual.Next() - allN := i.all.Next() - - chunkSize, err := getChunkSize(i.postings.sb.chunkMode, i.postings.postings.GetCardinality(), i.postings.sb.numDocs) - if err != nil { - return 0, false, err - } - nChunk := n / uint32(chunkSize) - - // when allN becomes >= to here, then allN is in the same chunk as nChunk. - allNReachesNChunk := nChunk * uint32(chunkSize) - - // n is the next actual hit (excluding some postings), and - // allN is the next hit in the full postings, and - // if they don't match, move 'all' forwards until they do - for allN != n { - // we've reached same chunk, so move the freq/norm/loc decoders forward - if i.includeFreqNorm && allN >= allNReachesNChunk { - err := i.currChunkNext(nChunk) - if err != nil { - return 0, false, err - } - } - - allN = i.all.Next() - } - - if i.includeFreqNorm && (i.currChunk != nChunk || i.freqNormReader.isNil()) { - err := i.loadChunk(int(nChunk)) - if err != nil { - return 0, false, fmt.Errorf("error loading chunk: %v", err) - } - } - - return uint64(n), true, nil -} - -// optimization when the postings list is "clean" (e.g., no updates & -// no deletions) where the all bitmap is the same as the actual bitmap -func (i *PostingsIterator) nextDocNumAtOrAfterClean( - atOrAfter uint64) (uint64, bool, error) { - if !i.includeFreqNorm { - i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) - - if !i.Actual.HasNext() { - return 0, false, nil // couldn't find anything - } - - return uint64(i.Actual.Next()), true, nil - } - - chunkSize, err := getChunkSize(i.postings.sb.chunkMode, i.postings.postings.GetCardinality(), i.postings.sb.numDocs) - if err != nil { - return 0, false, err - } - - // freq-norm's needed, so maintain freq-norm chunk reader - sameChunkNexts := 0 // # of times we called Next() in the same chunk - n := i.Actual.Next() - nChunk := n / uint32(chunkSize) - - for uint64(n) < atOrAfter && i.Actual.HasNext() { - n = i.Actual.Next() - - nChunkPrev := nChunk - nChunk = n / uint32(chunkSize) - - if nChunk != nChunkPrev { - sameChunkNexts = 0 - } else { - sameChunkNexts += 1 - } - } - - if uint64(n) < atOrAfter { - // couldn't find anything - return 0, false, nil - } - - for j := 0; j < sameChunkNexts; j++ { - err := i.currChunkNext(nChunk) - if err != nil { - return 0, false, fmt.Errorf("error optimized currChunkNext: %v", err) - } - } - - if i.currChunk != nChunk || i.freqNormReader.isNil() { - err := i.loadChunk(int(nChunk)) - if err != nil { - return 0, false, fmt.Errorf("error loading chunk: %v", err) - } - } - - return uint64(n), true, nil -} - -func (i *PostingsIterator) currChunkNext(nChunk uint32) error { - if i.currChunk != nChunk || i.freqNormReader.isNil() { - err := i.loadChunk(int(nChunk)) - if err != nil { - return fmt.Errorf("error loading chunk: %v", err) - } - } - - // read off freq/offsets even though we don't care about them - hasLocs, err := i.skipFreqNormReadHasLocs() - if err != nil { - return err - } - - if i.includeLocs && hasLocs { - numLocsBytes, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location numLocsBytes: %v", err) - } - - // skip over all the location bytes - i.locReader.SkipBytes(int(numLocsBytes)) - } - - return nil -} - -// DocNum1Hit returns the docNum and true if this is "1-hit" optimized -// and the docNum is available. -func (p *PostingsIterator) DocNum1Hit() (uint64, bool) { - if p.normBits1Hit != 0 && p.docNum1Hit != DocNum1HitFinished { - return p.docNum1Hit, true - } - return 0, false -} - -// ActualBitmap returns the underlying actual bitmap -// which can be used up the stack for optimizations -func (p *PostingsIterator) ActualBitmap() *roaring.Bitmap { - return p.ActualBM -} - -// ReplaceActual replaces the ActualBM with the provided -// bitmap -func (p *PostingsIterator) ReplaceActual(abm *roaring.Bitmap) { - p.ActualBM = abm - p.Actual = abm.Iterator() -} - -// PostingsIteratorFromBitmap constructs a PostingsIterator given an -// "actual" bitmap. -func PostingsIteratorFromBitmap(bm *roaring.Bitmap, - includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { - return &PostingsIterator{ - ActualBM: bm, - Actual: bm.Iterator(), - includeFreqNorm: includeFreqNorm, - includeLocs: includeLocs, - }, nil -} - -// PostingsIteratorFrom1Hit constructs a PostingsIterator given a -// 1-hit docNum. -func PostingsIteratorFrom1Hit(docNum1Hit uint64, - includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { - return &PostingsIterator{ - docNum1Hit: docNum1Hit, - normBits1Hit: NormBits1Hit, - includeFreqNorm: includeFreqNorm, - includeLocs: includeLocs, - }, nil -} - -// Posting is a single entry in a postings list -type Posting struct { - docNum uint64 - freq uint64 - norm float32 - locs []segment.Location -} - -func (p *Posting) Size() int { - sizeInBytes := reflectStaticSizePosting - - for _, entry := range p.locs { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -// Number returns the document number of this posting in this segment -func (p *Posting) Number() uint64 { - return p.docNum -} - -// Frequency returns the frequencies of occurrence of this term in this doc/field -func (p *Posting) Frequency() uint64 { - return p.freq -} - -// Norm returns the normalization factor for this posting -func (p *Posting) Norm() float64 { - return float64(p.norm) -} - -// Locations returns the location information for each occurrence -func (p *Posting) Locations() []segment.Location { - return p.locs -} - -// Location represents the location of a single occurrence -type Location struct { - field string - pos uint64 - start uint64 - end uint64 - ap []uint64 -} - -func (l *Location) Size() int { - return reflectStaticSizeLocation + - len(l.field) + - len(l.ap)*SizeOfUint64 -} - -// Field returns the name of the field (useful in composite fields to know -// which original field the value came from) -func (l *Location) Field() string { - return l.field -} - -// Start returns the start byte offset of this occurrence -func (l *Location) Start() uint64 { - return l.start -} - -// End returns the end byte offset of this occurrence -func (l *Location) End() uint64 { - return l.end -} - -// Pos returns the 1-based phrase position of this occurrence -func (l *Location) Pos() uint64 { - return l.pos -} - -// ArrayPositions returns the array position vector associated with this occurrence -func (l *Location) ArrayPositions() []uint64 { - return l.ap -} diff --git a/vendor/github.com/blevesearch/zapx/v13/read.go b/vendor/github.com/blevesearch/zapx/v13/read.go deleted file mode 100644 index e47d4c6ab..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/read.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import "encoding/binary" - -func (s *SegmentBase) getDocStoredMetaAndCompressed(docNum uint64) ([]byte, []byte) { - _, storedOffset, n, metaLen, dataLen := s.getDocStoredOffsets(docNum) - - meta := s.mem[storedOffset+n : storedOffset+n+metaLen] - data := s.mem[storedOffset+n+metaLen : storedOffset+n+metaLen+dataLen] - - return meta, data -} - -func (s *SegmentBase) getDocStoredOffsets(docNum uint64) ( - uint64, uint64, uint64, uint64, uint64) { - indexOffset := s.storedIndexOffset + (8 * docNum) - - storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) - - var n uint64 - - metaLen, read := binary.Uvarint(s.mem[storedOffset : storedOffset+binary.MaxVarintLen64]) - n += uint64(read) - - dataLen, read := binary.Uvarint(s.mem[storedOffset+n : storedOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - return indexOffset, storedOffset, n, metaLen, dataLen -} diff --git a/vendor/github.com/blevesearch/zapx/v13/segment.go b/vendor/github.com/blevesearch/zapx/v13/segment.go deleted file mode 100644 index 6317ad403..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/segment.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "os" - "sync" - "unsafe" - - "github.com/RoaringBitmap/roaring" - mmap "github.com/blevesearch/mmap-go" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - "github.com/golang/snappy" -) - -var reflectStaticSizeSegmentBase int - -func init() { - var sb SegmentBase - reflectStaticSizeSegmentBase = int(unsafe.Sizeof(sb)) -} - -// Open returns a zap impl of a segment -func (*ZapPlugin) Open(path string) (segment.Segment, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - mm, err := mmap.Map(f, mmap.RDONLY, 0) - if err != nil { - // mmap failed, try to close the file - _ = f.Close() - return nil, err - } - - rv := &Segment{ - SegmentBase: SegmentBase{ - mem: mm[0 : len(mm)-FooterSize], - fieldsMap: make(map[string]uint16), - fieldDvReaders: make(map[uint16]*docValueReader), - fieldFSTs: make(map[uint16]*vellum.FST), - }, - f: f, - mm: mm, - path: path, - refs: 1, - } - rv.SegmentBase.updateSize() - - err = rv.loadConfig() - if err != nil { - _ = rv.Close() - return nil, err - } - - err = rv.loadFields() - if err != nil { - _ = rv.Close() - return nil, err - } - - err = rv.loadDvReaders() - if err != nil { - _ = rv.Close() - return nil, err - } - - return rv, nil -} - -// SegmentBase is a memory only, read-only implementation of the -// segment.Segment interface, using zap's data representation. -type SegmentBase struct { - mem []byte - memCRC uint32 - chunkMode uint32 - fieldsMap map[string]uint16 // fieldName -> fieldID+1 - fieldsInv []string // fieldID -> fieldName - numDocs uint64 - storedIndexOffset uint64 - fieldsIndexOffset uint64 - docValueOffset uint64 - dictLocs []uint64 - fieldDvReaders map[uint16]*docValueReader // naive chunk cache per field - fieldDvNames []string // field names cached in fieldDvReaders - size uint64 - - m sync.Mutex - fieldFSTs map[uint16]*vellum.FST -} - -func (sb *SegmentBase) Size() int { - return int(sb.size) -} - -func (sb *SegmentBase) updateSize() { - sizeInBytes := reflectStaticSizeSegmentBase + - cap(sb.mem) - - // fieldsMap - for k := range sb.fieldsMap { - sizeInBytes += (len(k) + SizeOfString) + SizeOfUint16 - } - - // fieldsInv, dictLocs - for _, entry := range sb.fieldsInv { - sizeInBytes += len(entry) + SizeOfString - } - sizeInBytes += len(sb.dictLocs) * SizeOfUint64 - - // fieldDvReaders - for _, v := range sb.fieldDvReaders { - sizeInBytes += SizeOfUint16 + SizeOfPtr - if v != nil { - sizeInBytes += v.size() - } - } - - sb.size = uint64(sizeInBytes) -} - -func (sb *SegmentBase) AddRef() {} -func (sb *SegmentBase) DecRef() (err error) { return nil } -func (sb *SegmentBase) Close() (err error) { return nil } - -// Segment implements a persisted segment.Segment interface, by -// embedding an mmap()'ed SegmentBase. -type Segment struct { - SegmentBase - - f *os.File - mm mmap.MMap - path string - version uint32 - crc uint32 - - m sync.Mutex // Protects the fields that follow. - refs int64 -} - -func (s *Segment) Size() int { - // 8 /* size of file pointer */ - // 4 /* size of version -> uint32 */ - // 4 /* size of crc -> uint32 */ - sizeOfUints := 16 - - sizeInBytes := (len(s.path) + SizeOfString) + sizeOfUints - - // mutex, refs -> int64 - sizeInBytes += 16 - - // do not include the mmap'ed part - return sizeInBytes + s.SegmentBase.Size() - cap(s.mem) -} - -func (s *Segment) AddRef() { - s.m.Lock() - s.refs++ - s.m.Unlock() -} - -func (s *Segment) DecRef() (err error) { - s.m.Lock() - s.refs-- - if s.refs == 0 { - err = s.closeActual() - } - s.m.Unlock() - return err -} - -func (s *Segment) loadConfig() error { - crcOffset := len(s.mm) - 4 - s.crc = binary.BigEndian.Uint32(s.mm[crcOffset : crcOffset+4]) - - verOffset := crcOffset - 4 - s.version = binary.BigEndian.Uint32(s.mm[verOffset : verOffset+4]) - if s.version != Version { - return fmt.Errorf("unsupported version %d", s.version) - } - - chunkOffset := verOffset - 4 - s.chunkMode = binary.BigEndian.Uint32(s.mm[chunkOffset : chunkOffset+4]) - - docValueOffset := chunkOffset - 8 - s.docValueOffset = binary.BigEndian.Uint64(s.mm[docValueOffset : docValueOffset+8]) - - fieldsIndexOffset := docValueOffset - 8 - s.fieldsIndexOffset = binary.BigEndian.Uint64(s.mm[fieldsIndexOffset : fieldsIndexOffset+8]) - - storedIndexOffset := fieldsIndexOffset - 8 - s.storedIndexOffset = binary.BigEndian.Uint64(s.mm[storedIndexOffset : storedIndexOffset+8]) - - numDocsOffset := storedIndexOffset - 8 - s.numDocs = binary.BigEndian.Uint64(s.mm[numDocsOffset : numDocsOffset+8]) - return nil -} - -func (s *SegmentBase) loadFields() error { - // NOTE for now we assume the fields index immediately precedes - // the footer, and if this changes, need to adjust accordingly (or - // store explicit length), where s.mem was sliced from s.mm in Open(). - fieldsIndexEnd := uint64(len(s.mem)) - - // iterate through fields index - var fieldID uint64 - for s.fieldsIndexOffset+(8*fieldID) < fieldsIndexEnd { - addr := binary.BigEndian.Uint64(s.mem[s.fieldsIndexOffset+(8*fieldID) : s.fieldsIndexOffset+(8*fieldID)+8]) - - dictLoc, read := binary.Uvarint(s.mem[addr:fieldsIndexEnd]) - n := uint64(read) - s.dictLocs = append(s.dictLocs, dictLoc) - - var nameLen uint64 - nameLen, read = binary.Uvarint(s.mem[addr+n : fieldsIndexEnd]) - n += uint64(read) - - name := string(s.mem[addr+n : addr+n+nameLen]) - s.fieldsInv = append(s.fieldsInv, name) - s.fieldsMap[name] = uint16(fieldID + 1) - - fieldID++ - } - return nil -} - -// Dictionary returns the term dictionary for the specified field -func (s *SegmentBase) Dictionary(field string) (segment.TermDictionary, error) { - dict, err := s.dictionary(field) - if err == nil && dict == nil { - return emptyDictionary, nil - } - return dict, err -} - -func (sb *SegmentBase) dictionary(field string) (rv *Dictionary, err error) { - fieldIDPlus1 := sb.fieldsMap[field] - if fieldIDPlus1 > 0 { - rv = &Dictionary{ - sb: sb, - field: field, - fieldID: fieldIDPlus1 - 1, - } - - dictStart := sb.dictLocs[rv.fieldID] - if dictStart > 0 { - var ok bool - sb.m.Lock() - if rv.fst, ok = sb.fieldFSTs[rv.fieldID]; !ok { - // read the length of the vellum data - vellumLen, read := binary.Uvarint(sb.mem[dictStart : dictStart+binary.MaxVarintLen64]) - fstBytes := sb.mem[dictStart+uint64(read) : dictStart+uint64(read)+vellumLen] - rv.fst, err = vellum.Load(fstBytes) - if err != nil { - sb.m.Unlock() - return nil, fmt.Errorf("dictionary field %s vellum err: %v", field, err) - } - - sb.fieldFSTs[rv.fieldID] = rv.fst - } - - sb.m.Unlock() - rv.fstReader, err = rv.fst.Reader() - if err != nil { - return nil, fmt.Errorf("dictionary field %s vellum reader err: %v", field, err) - } - } - } - - return rv, nil -} - -// visitDocumentCtx holds data structures that are reusable across -// multiple VisitDocument() calls to avoid memory allocations -type visitDocumentCtx struct { - buf []byte - reader bytes.Reader - arrayPos []uint64 -} - -var visitDocumentCtxPool = sync.Pool{ - New: func() interface{} { - reuse := &visitDocumentCtx{} - return reuse - }, -} - -// VisitStoredFields invokes the StoredFieldValueVisitor for each stored field -// for the specified doc number -func (s *SegmentBase) VisitStoredFields(num uint64, visitor segment.StoredFieldValueVisitor) error { - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - defer visitDocumentCtxPool.Put(vdc) - return s.visitStoredFields(vdc, num, visitor) -} - -func (s *SegmentBase) visitStoredFields(vdc *visitDocumentCtx, num uint64, - visitor segment.StoredFieldValueVisitor) error { - // first make sure this is a valid number in this segment - if num < s.numDocs { - meta, compressed := s.getDocStoredMetaAndCompressed(num) - - vdc.reader.Reset(meta) - - // handle _id field special case - idFieldValLen, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - idFieldVal := compressed[:idFieldValLen] - - keepGoing := visitor("_id", byte('t'), idFieldVal, nil) - if !keepGoing { - visitDocumentCtxPool.Put(vdc) - return nil - } - - // handle non-"_id" fields - compressed = compressed[idFieldValLen:] - - uncompressed, err := snappy.Decode(vdc.buf[:cap(vdc.buf)], compressed) - if err != nil { - return err - } - - for keepGoing { - field, err := binary.ReadUvarint(&vdc.reader) - if err == io.EOF { - break - } - if err != nil { - return err - } - typ, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - offset, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - l, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - numap, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - var arrayPos []uint64 - if numap > 0 { - if cap(vdc.arrayPos) < int(numap) { - vdc.arrayPos = make([]uint64, numap) - } - arrayPos = vdc.arrayPos[:numap] - for i := 0; i < int(numap); i++ { - ap, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - arrayPos[i] = ap - } - } - - value := uncompressed[offset : offset+l] - keepGoing = visitor(s.fieldsInv[field], byte(typ), value, arrayPos) - } - - vdc.buf = uncompressed - } - return nil -} - -// DocID returns the value of the _id field for the given docNum -func (s *SegmentBase) DocID(num uint64) ([]byte, error) { - if num >= s.numDocs { - return nil, nil - } - - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - - meta, compressed := s.getDocStoredMetaAndCompressed(num) - - vdc.reader.Reset(meta) - - // handle _id field special case - idFieldValLen, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return nil, err - } - idFieldVal := compressed[:idFieldValLen] - - visitDocumentCtxPool.Put(vdc) - - return idFieldVal, nil -} - -// Count returns the number of documents in this segment. -func (s *SegmentBase) Count() uint64 { - return s.numDocs -} - -// DocNumbers returns a bitset corresponding to the doc numbers of all the -// provided _id strings -func (s *SegmentBase) DocNumbers(ids []string) (*roaring.Bitmap, error) { - rv := roaring.New() - - if len(s.fieldsMap) > 0 { - idDict, err := s.dictionary("_id") - if err != nil { - return nil, err - } - - postingsList := emptyPostingsList - - sMax, err := idDict.fst.GetMaxKey() - if err != nil { - return nil, err - } - sMaxStr := string(sMax) - filteredIds := make([]string, 0, len(ids)) - for _, id := range ids { - if id <= sMaxStr { - filteredIds = append(filteredIds, id) - } - } - - for _, id := range filteredIds { - postingsList, err = idDict.postingsList([]byte(id), nil, postingsList) - if err != nil { - return nil, err - } - postingsList.OrInto(rv) - } - } - - return rv, nil -} - -// Fields returns the field names used in this segment -func (s *SegmentBase) Fields() []string { - return s.fieldsInv -} - -// Path returns the path of this segment on disk -func (s *Segment) Path() string { - return s.path -} - -// Close releases all resources associated with this segment -func (s *Segment) Close() (err error) { - return s.DecRef() -} - -func (s *Segment) closeActual() (err error) { - if s.mm != nil { - err = s.mm.Unmap() - } - // try to close file even if unmap failed - if s.f != nil { - err2 := s.f.Close() - if err == nil { - // try to return first error - err = err2 - } - } - return -} - -// some helpers i started adding for the command-line utility - -// Data returns the underlying mmaped data slice -func (s *Segment) Data() []byte { - return s.mm -} - -// CRC returns the CRC value stored in the file footer -func (s *Segment) CRC() uint32 { - return s.crc -} - -// Version returns the file version in the file footer -func (s *Segment) Version() uint32 { - return s.version -} - -// ChunkFactor returns the chunk factor in the file footer -func (s *Segment) ChunkMode() uint32 { - return s.chunkMode -} - -// FieldsIndexOffset returns the fields index offset in the file footer -func (s *Segment) FieldsIndexOffset() uint64 { - return s.fieldsIndexOffset -} - -// StoredIndexOffset returns the stored value index offset in the file footer -func (s *Segment) StoredIndexOffset() uint64 { - return s.storedIndexOffset -} - -// DocValueOffset returns the docValue offset in the file footer -func (s *Segment) DocValueOffset() uint64 { - return s.docValueOffset -} - -// NumDocs returns the number of documents in the file footer -func (s *Segment) NumDocs() uint64 { - return s.numDocs -} - -// DictAddr is a helper function to compute the file offset where the -// dictionary is stored for the specified field. -func (s *Segment) DictAddr(field string) (uint64, error) { - fieldIDPlus1, ok := s.fieldsMap[field] - if !ok { - return 0, fmt.Errorf("no such field '%s'", field) - } - - return s.dictLocs[fieldIDPlus1-1], nil -} - -func (s *SegmentBase) loadDvReaders() error { - if s.docValueOffset == fieldNotUninverted || s.numDocs == 0 { - return nil - } - - var read uint64 - for fieldID, field := range s.fieldsInv { - var fieldLocStart, fieldLocEnd uint64 - var n int - fieldLocStart, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) - if n <= 0 { - return fmt.Errorf("loadDvReaders: failed to read the docvalue offset start for field %d", fieldID) - } - read += uint64(n) - fieldLocEnd, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) - if n <= 0 { - return fmt.Errorf("loadDvReaders: failed to read the docvalue offset end for field %d", fieldID) - } - read += uint64(n) - - fieldDvReader, err := s.loadFieldDocValueReader(field, fieldLocStart, fieldLocEnd) - if err != nil { - return err - } - if fieldDvReader != nil { - s.fieldDvReaders[uint16(fieldID)] = fieldDvReader - s.fieldDvNames = append(s.fieldDvNames, field) - } - } - - return nil -} diff --git a/vendor/github.com/blevesearch/zapx/v13/sizes.go b/vendor/github.com/blevesearch/zapx/v13/sizes.go deleted file mode 100644 index 34166ea33..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/sizes.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "reflect" -) - -func init() { - var b bool - SizeOfBool = int(reflect.TypeOf(b).Size()) - var f32 float32 - SizeOfFloat32 = int(reflect.TypeOf(f32).Size()) - var f64 float64 - SizeOfFloat64 = int(reflect.TypeOf(f64).Size()) - var i int - SizeOfInt = int(reflect.TypeOf(i).Size()) - var m map[int]int - SizeOfMap = int(reflect.TypeOf(m).Size()) - var ptr *int - SizeOfPtr = int(reflect.TypeOf(ptr).Size()) - var slice []int - SizeOfSlice = int(reflect.TypeOf(slice).Size()) - var str string - SizeOfString = int(reflect.TypeOf(str).Size()) - var u8 uint8 - SizeOfUint8 = int(reflect.TypeOf(u8).Size()) - var u16 uint16 - SizeOfUint16 = int(reflect.TypeOf(u16).Size()) - var u32 uint32 - SizeOfUint32 = int(reflect.TypeOf(u32).Size()) - var u64 uint64 - SizeOfUint64 = int(reflect.TypeOf(u64).Size()) -} - -var SizeOfBool int -var SizeOfFloat32 int -var SizeOfFloat64 int -var SizeOfInt int -var SizeOfMap int -var SizeOfPtr int -var SizeOfSlice int -var SizeOfString int -var SizeOfUint8 int -var SizeOfUint16 int -var SizeOfUint32 int -var SizeOfUint64 int diff --git a/vendor/github.com/blevesearch/zapx/v13/write.go b/vendor/github.com/blevesearch/zapx/v13/write.go deleted file mode 100644 index 77aefdbfc..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/write.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "io" - - "github.com/RoaringBitmap/roaring" -) - -// writes out the length of the roaring bitmap in bytes as varint -// then writes out the roaring bitmap itself -func writeRoaringWithLen(r *roaring.Bitmap, w io.Writer, - reuseBufVarint []byte) (int, error) { - buf, err := r.ToBytes() - if err != nil { - return 0, err - } - - var tw int - - // write out the length - n := binary.PutUvarint(reuseBufVarint, uint64(len(buf))) - nw, err := w.Write(reuseBufVarint[:n]) - tw += nw - if err != nil { - return tw, err - } - - // write out the roaring bytes - nw, err = w.Write(buf) - tw += nw - if err != nil { - return tw, err - } - - return tw, nil -} - -func persistFields(fieldsInv []string, w *CountHashWriter, dictLocs []uint64) (uint64, error) { - var rv uint64 - var fieldsOffsets []uint64 - - for fieldID, fieldName := range fieldsInv { - // record start of this field - fieldsOffsets = append(fieldsOffsets, uint64(w.Count())) - - // write out the dict location and field name length - _, err := writeUvarints(w, dictLocs[fieldID], uint64(len(fieldName))) - if err != nil { - return 0, err - } - - // write out the field name - _, err = w.Write([]byte(fieldName)) - if err != nil { - return 0, err - } - } - - // now write out the fields index - rv = uint64(w.Count()) - for fieldID := range fieldsInv { - err := binary.Write(w, binary.BigEndian, fieldsOffsets[fieldID]) - if err != nil { - return 0, err - } - } - - return rv, nil -} - -// FooterSize is the size of the footer record in bytes -// crc + ver + chunk + field offset + stored offset + num docs + docValueOffset -const FooterSize = 4 + 4 + 4 + 8 + 8 + 8 + 8 - -func persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64, - chunkMode uint32, crcBeforeFooter uint32, writerIn io.Writer) error { - w := NewCountHashWriter(writerIn) - w.crc = crcBeforeFooter - - // write out the number of docs - err := binary.Write(w, binary.BigEndian, numDocs) - if err != nil { - return err - } - // write out the stored field index location: - err = binary.Write(w, binary.BigEndian, storedIndexOffset) - if err != nil { - return err - } - // write out the field index location - err = binary.Write(w, binary.BigEndian, fieldsIndexOffset) - if err != nil { - return err - } - // write out the fieldDocValue location - err = binary.Write(w, binary.BigEndian, docValueOffset) - if err != nil { - return err - } - // write out 32-bit chunk factor - err = binary.Write(w, binary.BigEndian, chunkMode) - if err != nil { - return err - } - // write out 32-bit version - err = binary.Write(w, binary.BigEndian, Version) - if err != nil { - return err - } - // write out CRC-32 of everything upto but not including this CRC - err = binary.Write(w, binary.BigEndian, w.crc) - if err != nil { - return err - } - return nil -} - -func writeUvarints(w io.Writer, vals ...uint64) (tw int, err error) { - buf := make([]byte, binary.MaxVarintLen64) - for _, val := range vals { - n := binary.PutUvarint(buf, val) - var nw int - nw, err = w.Write(buf[:n]) - tw += nw - if err != nil { - return tw, err - } - } - return tw, err -} diff --git a/vendor/github.com/blevesearch/zapx/v13/zap.md b/vendor/github.com/blevesearch/zapx/v13/zap.md deleted file mode 100644 index d74dc548b..000000000 --- a/vendor/github.com/blevesearch/zapx/v13/zap.md +++ /dev/null @@ -1,177 +0,0 @@ -# ZAP File Format - -## Legend - -### Sections - - |========| - | | section - |========| - -### Fixed-size fields - - |--------| |----| |--| |-| - | | uint64 | | uint32 | | uint16 | | uint8 - |--------| |----| |--| |-| - -### Varints - - |~~~~~~~~| - | | varint(up to uint64) - |~~~~~~~~| - -### Arbitrary-length fields - - |--------...---| - | | arbitrary-length field (string, vellum, roaring bitmap) - |--------...---| - -### Chunked data - - [--------] - [ ] - [--------] - -## Overview - -Footer section describes the configuration of particular ZAP file. The format of footer is version-dependent, so it is necessary to check `V` field before the parsing. - - |==================================================| - | Stored Fields | - |==================================================| - |-----> | Stored Fields Index | - | |==================================================| - | | Dictionaries + Postings + DocValues | - | |==================================================| - | |---> | DocValues Index | - | | |==================================================| - | | | Fields | - | | |==================================================| - | | |-> | Fields Index | - | | | |========|========|========|========|====|====|====| - | | | | D# | SF | F | FDV | CF | V | CC | (Footer) - | | | |========|====|===|====|===|====|===|====|====|====| - | | | | | | - |-+-+-----------------| | | - | |--------------------------| | - |-------------------------------------| - - D#. Number of Docs. - SF. Stored Fields Index Offset. - F. Field Index Offset. - FDV. Field DocValue Offset. - CF. Chunk Factor. - V. Version. - CC. CRC32. - -## Stored Fields - -Stored Fields Index is `D#` consecutive 64-bit unsigned integers - offsets, where relevant Stored Fields Data records are located. - - 0 [SF] [SF + D# * 8] - | Stored Fields | Stored Fields Index | - |================================|==================================| - | | | - | |--------------------| ||--------|--------|. . .|--------|| - | |-> | Stored Fields Data | || 0 | 1 | | D# - 1 || - | | |--------------------| ||--------|----|---|. . .|--------|| - | | | | | - |===|============================|==============|===================| - | | - |-------------------------------------------| - -Stored Fields Data is an arbitrary size record, which consists of metadata and [Snappy](https://github.com/golang/snappy)-compressed data. - - Stored Fields Data - |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| - | MDS | CDS | MD | CD | - |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| - - MDS. Metadata size. - CDS. Compressed data size. - MD. Metadata. - CD. Snappy-compressed data. - -## Fields - -Fields Index section located between addresses `F` and `len(file) - len(footer)` and consist of `uint64` values (`F1`, `F2`, ...) which are offsets to records in Fields section. We have `F# = (len(file) - len(footer) - F) / sizeof(uint64)` fields. - - - (...) [F] [F + F#] - | Fields | Fields Index. | - |================================|================================| - | | | - | |~~~~~~~~|~~~~~~~~|---...---|||--------|--------|...|--------|| - ||->| Dict | Length | Name ||| 0 | 1 | | F# - 1 || - || |~~~~~~~~|~~~~~~~~|---...---|||--------|----|---|...|--------|| - || | | | - ||===============================|==============|=================| - | | - |----------------------------------------------| - - -## Dictionaries + Postings - -Each of fields has its own dictionary, encoded in [Vellum](https://github.com/couchbase/vellum) format. Dictionary consists of pairs `(term, offset)`, where `offset` indicates the position of postings (list of documents) for this particular term. - - |================================================================|- Dictionaries + - | | Postings + - | | DocValues - | Freq/Norm (chunked) | - | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | - | |->[ Freq | Norm (float32 under varint) ] | - | | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | - | | | - | |------------------------------------------------------------| | - | Location Details (chunked) | | - | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | - | |->[ Size | Pos | Start | End | Arr# | ArrPos | ... ] | | - | | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | - | | | | - | |----------------------| | | - | Postings List | | | - | |~~~~~~~~|~~~~~|~~|~~~~~~~~|-----------...--| | | - | |->| F/N | LD | Length | ROARING BITMAP | | | - | | |~~~~~|~~|~~~~~~~~|~~~~~~~~|-----------...--| | | - | | |----------------------------------------------| | - | |--------------------------------------| | - | Dictionary | | - | |~~~~~~~~|--------------------------|-...-| | - | |->| Length | VELLUM DATA : (TERM -> OFFSET) | | - | | |~~~~~~~~|----------------------------...-| | - | | | - |======|=========================================================|- DocValues Index - | | | - |======|=========================================================|- Fields - | | | - | |~~~~|~~~|~~~~~~~~|---...---| | - | | Dict | Length | Name | | - | |~~~~~~~~|~~~~~~~~|---...---| | - | | - |================================================================| - -## DocValues - -DocValues Index is `F#` pairs of varints, one pair per field. Each pair of varints indicates start and end point of DocValues slice. - - |================================================================| - | |------...--| | - | |->| DocValues |<-| | - | | |------...--| | | - |==|=================|===========================================|- DocValues Index - ||~|~~~~~~~~~|~~~~~~~|~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| - || DV1 START | DV1 STOP | . . . . . | DV(F#) START | DV(F#) END || - ||~~~~~~~~~~~|~~~~~~~~~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| - |================================================================| - -DocValues is chunked Snappy-compressed values for each document and field. - - [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] - [ Doc# in Chunk | Doc1 | Offset1 | ... | DocN | OffsetN | SNAPPY COMPRESSED DATA ] - [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] - -Last 16 bytes are description of chunks. - - |~~~~~~~~~~~~...~|----------------|----------------| - | Chunk Sizes | Chunk Size Arr | Chunk# | - |~~~~~~~~~~~~...~|----------------|----------------| diff --git a/vendor/github.com/blevesearch/zapx/v14/.golangci.yml b/vendor/github.com/blevesearch/zapx/v14/.golangci.yml deleted file mode 100644 index f0f2f6067..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/.golangci.yml +++ /dev/null @@ -1,29 +0,0 @@ -linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true - enable: - - bodyclose - - deadcode - - depguard - - dupl - - errcheck - - gofmt - - goimports - - goprintffuncname - - gosec - - gosimple - - govet - - ineffassign - - interfacer - - misspell - - nakedret - - nolintlint - - rowserrcheck - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - whitespace - diff --git a/vendor/github.com/blevesearch/zapx/v14/LICENSE b/vendor/github.com/blevesearch/zapx/v14/LICENSE deleted file mode 100644 index 7a4a3ea24..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/blevesearch/zapx/v14/README.md b/vendor/github.com/blevesearch/zapx/v14/README.md deleted file mode 100644 index 4cbf1a145..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# zapx file format - -The zapx module is fork of [zap](https://github.com/blevesearch/zap) module which maintains file format compatibility, but removes dependency on bleve, and instead depends only on the indepenent interface modules: - -- [bleve_index_api](https://github.com/blevesearch/scorch_segment_api) -- [scorch_segment_api](https://github.com/blevesearch/scorch_segment_api) - -Advanced ZAP File Format Documentation is [here](zap.md). - -The file is written in the reverse order that we typically access data. This helps us write in one pass since later sections of the file require file offsets of things we've already written. - -Current usage: - -- mmap the entire file -- crc-32 bytes and version are in fixed position at end of the file -- reading remainder of footer could be version specific -- remainder of footer gives us: - - 3 important offsets (docValue , fields index and stored data index) - - 2 important values (number of docs and chunk factor) -- field data is processed once and memoized onto the heap so that we never have to go back to disk for it -- access to stored data by doc number means first navigating to the stored data index, then accessing a fixed position offset into that slice, which gives us the actual address of the data. the first bytes of that section tell us the size of data so that we know where it ends. -- access to all other indexed data follows the following pattern: - - first know the field name -> convert to id - - next navigate to term dictionary for that field - - some operations stop here and do dictionary ops - - next use dictionary to navigate to posting list for a specific term - - walk posting list - - if necessary, walk posting details as we go - - if location info is desired, consult location bitmap to see if it is there - -## stored fields section - -- for each document - - preparation phase: - - produce a slice of metadata bytes and data bytes - - produce these slices in field id order - - field value is appended to the data slice - - metadata slice is varint encoded with the following values for each field value - - field id (uint16) - - field type (byte) - - field value start offset in uncompressed data slice (uint64) - - field value length (uint64) - - field number of array positions (uint64) - - one additional value for each array position (uint64) - - compress the data slice using snappy - - file writing phase: - - remember the start offset for this document - - write out meta data length (varint uint64) - - write out compressed data length (varint uint64) - - write out the metadata bytes - - write out the compressed data bytes - -## stored fields idx - -- for each document - - write start offset (remembered from previous section) of stored data (big endian uint64) - -With this index and a known document number, we have direct access to all the stored field data. - -## posting details (freq/norm) section - -- for each posting list - - produce a slice containing multiple consecutive chunks (each chunk is varint stream) - - produce a slice remembering offsets of where each chunk starts - - preparation phase: - - for each hit in the posting list - - if this hit is in next chunk close out encoding of last chunk and record offset start of next - - encode term frequency (uint64) - - encode norm factor (float32) - - file writing phase: - - remember start position for this posting list details - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. - -## posting details (location) section - -- for each posting list - - produce a slice containing multiple consecutive chunks (each chunk is varint stream) - - produce a slice remembering offsets of where each chunk starts - - preparation phase: - - for each hit in the posting list - - if this hit is in next chunk close out encoding of last chunk and record offset start of next - - encode field (uint16) - - encode field pos (uint64) - - encode field start (uint64) - - encode field end (uint64) - - encode number of array positions to follow (uint64) - - encode each array position (each uint64) - - file writing phase: - - remember start position for this posting list details - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. - -## postings list section - -- for each posting list - - preparation phase: - - encode roaring bitmap posting list to bytes (so we know the length) - - file writing phase: - - remember the start position for this posting list - - write freq/norm details offset (remembered from previous, as varint uint64) - - write location details offset (remembered from previous, as varint uint64) - - write length of encoded roaring bitmap - - write the serialized roaring bitmap data - -## dictionary - -- for each field - - preparation phase: - - encode vellum FST with dictionary data pointing to file offset of posting list (remembered from previous) - - file writing phase: - - remember the start position of this persistDictionary - - write length of vellum data (varint uint64) - - write out vellum data - -## fields section - -- for each field - - file writing phase: - - remember start offset for each field - - write dictionary address (remembered from previous) (varint uint64) - - write length of field name (varint uint64) - - write field name bytes - -## fields idx - -- for each field - - file writing phase: - - write big endian uint64 of start offset for each field - -NOTE: currently we don't know or record the length of this fields index. Instead we rely on the fact that we know it immediately precedes a footer of known size. - -## fields DocValue - -- for each field - - preparation phase: - - produce a slice containing multiple consecutive chunks, where each chunk is composed of a meta section followed by compressed columnar field data - - produce a slice remembering the length of each chunk - - file writing phase: - - remember the start position of this first field DocValue offset in the footer - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -NOTE: currently the meta header inside each chunk gives clue to the location offsets and size of the data pertaining to a given docID and any -read operation leverage that meta information to extract the document specific data from the file. - -## footer - -- file writing phase - - write number of docs (big endian uint64) - - write stored field index location (big endian uint64) - - write field index location (big endian uint64) - - write field docValue location (big endian uint64) - - write out chunk factor (big endian uint32) - - write out version (big endian uint32) - - write out file CRC of everything preceding this (big endian uint32) diff --git a/vendor/github.com/blevesearch/zapx/v14/build.go b/vendor/github.com/blevesearch/zapx/v14/build.go deleted file mode 100644 index 59c426848..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/build.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bufio" - "math" - "os" - - "github.com/blevesearch/vellum" -) - -const Version uint32 = 14 - -const Type string = "zap" - -const fieldNotUninverted = math.MaxUint64 - -func (sb *SegmentBase) Persist(path string) error { - return PersistSegmentBase(sb, path) -} - -// PersistSegmentBase persists SegmentBase in the zap file format. -func PersistSegmentBase(sb *SegmentBase, path string) error { - flag := os.O_RDWR | os.O_CREATE - - f, err := os.OpenFile(path, flag, 0600) - if err != nil { - return err - } - - cleanup := func() { - _ = f.Close() - _ = os.Remove(path) - } - - br := bufio.NewWriter(f) - - _, err = br.Write(sb.mem) - if err != nil { - cleanup() - return err - } - - err = persistFooter(sb.numDocs, sb.storedIndexOffset, sb.fieldsIndexOffset, sb.docValueOffset, - sb.chunkMode, sb.memCRC, br) - if err != nil { - cleanup() - return err - } - - err = br.Flush() - if err != nil { - cleanup() - return err - } - - err = f.Sync() - if err != nil { - cleanup() - return err - } - - err = f.Close() - if err != nil { - cleanup() - return err - } - - return nil -} - -func persistStoredFieldValues(fieldID int, - storedFieldValues [][]byte, stf []byte, spf [][]uint64, - curr int, metaEncode varintEncoder, data []byte) ( - int, []byte, error) { - for i := 0; i < len(storedFieldValues); i++ { - // encode field - _, err := metaEncode(uint64(fieldID)) - if err != nil { - return 0, nil, err - } - // encode type - _, err = metaEncode(uint64(stf[i])) - if err != nil { - return 0, nil, err - } - // encode start offset - _, err = metaEncode(uint64(curr)) - if err != nil { - return 0, nil, err - } - // end len - _, err = metaEncode(uint64(len(storedFieldValues[i]))) - if err != nil { - return 0, nil, err - } - // encode number of array pos - _, err = metaEncode(uint64(len(spf[i]))) - if err != nil { - return 0, nil, err - } - // encode all array positions - for _, pos := range spf[i] { - _, err = metaEncode(pos) - if err != nil { - return 0, nil, err - } - } - - data = append(data, storedFieldValues[i]...) - curr += len(storedFieldValues[i]) - } - - return curr, data, nil -} - -func InitSegmentBase(mem []byte, memCRC uint32, chunkMode uint32, - fieldsMap map[string]uint16, fieldsInv []string, numDocs uint64, - storedIndexOffset uint64, fieldsIndexOffset uint64, docValueOffset uint64, - dictLocs []uint64) (*SegmentBase, error) { - sb := &SegmentBase{ - mem: mem, - memCRC: memCRC, - chunkMode: chunkMode, - fieldsMap: fieldsMap, - fieldsInv: fieldsInv, - numDocs: numDocs, - storedIndexOffset: storedIndexOffset, - fieldsIndexOffset: fieldsIndexOffset, - docValueOffset: docValueOffset, - dictLocs: dictLocs, - fieldDvReaders: make(map[uint16]*docValueReader), - fieldFSTs: make(map[uint16]*vellum.FST), - } - sb.updateSize() - - err := sb.loadDvReaders() - if err != nil { - return nil, err - } - - return sb, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v14/contentcoder.go b/vendor/github.com/blevesearch/zapx/v14/contentcoder.go deleted file mode 100644 index c145b5a11..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/contentcoder.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "io" - "reflect" - - "github.com/golang/snappy" -) - -var reflectStaticSizeMetaData int - -func init() { - var md MetaData - reflectStaticSizeMetaData = int(reflect.TypeOf(md).Size()) -} - -var termSeparator byte = 0xff -var termSeparatorSplitSlice = []byte{termSeparator} - -type chunkedContentCoder struct { - final []byte - chunkSize uint64 - currChunk uint64 - chunkLens []uint64 - - w io.Writer - progressiveWrite bool - - chunkMetaBuf bytes.Buffer - chunkBuf bytes.Buffer - - chunkMeta []MetaData - - compressed []byte // temp buf for snappy compression -} - -// MetaData represents the data information inside a -// chunk. -type MetaData struct { - DocNum uint64 // docNum of the data inside the chunk - DocDvOffset uint64 // offset of data inside the chunk for the given docid -} - -// newChunkedContentCoder returns a new chunk content coder which -// packs data into chunks based on the provided chunkSize -func newChunkedContentCoder(chunkSize uint64, maxDocNum uint64, - w io.Writer, progressiveWrite bool) *chunkedContentCoder { - total := maxDocNum/chunkSize + 1 - rv := &chunkedContentCoder{ - chunkSize: chunkSize, - chunkLens: make([]uint64, total), - chunkMeta: make([]MetaData, 0, total), - w: w, - progressiveWrite: progressiveWrite, - } - - return rv -} - -// Reset lets you reuse this chunked content coder. Buffers are reset -// and re used. You cannot change the chunk size. -func (c *chunkedContentCoder) Reset() { - c.currChunk = 0 - c.final = c.final[:0] - c.chunkBuf.Reset() - c.chunkMetaBuf.Reset() - for i := range c.chunkLens { - c.chunkLens[i] = 0 - } - c.chunkMeta = c.chunkMeta[:0] -} - -func (c *chunkedContentCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { - total := int(maxDocNum/chunkSize + 1) - c.chunkSize = chunkSize - if cap(c.chunkLens) < total { - c.chunkLens = make([]uint64, total) - } else { - c.chunkLens = c.chunkLens[:total] - } - if cap(c.chunkMeta) < total { - c.chunkMeta = make([]MetaData, 0, total) - } -} - -// Close indicates you are done calling Add() this allows -// the final chunk to be encoded. -func (c *chunkedContentCoder) Close() error { - return c.flushContents() -} - -func (c *chunkedContentCoder) flushContents() error { - // flush the contents, with meta information at first - buf := make([]byte, binary.MaxVarintLen64) - n := binary.PutUvarint(buf, uint64(len(c.chunkMeta))) - _, err := c.chunkMetaBuf.Write(buf[:n]) - if err != nil { - return err - } - - // write out the metaData slice - for _, meta := range c.chunkMeta { - _, err := writeUvarints(&c.chunkMetaBuf, meta.DocNum, meta.DocDvOffset) - if err != nil { - return err - } - } - - // write the metadata to final data - metaData := c.chunkMetaBuf.Bytes() - c.final = append(c.final, c.chunkMetaBuf.Bytes()...) - // write the compressed data to the final data - c.compressed = snappy.Encode(c.compressed[:cap(c.compressed)], c.chunkBuf.Bytes()) - c.final = append(c.final, c.compressed...) - - c.chunkLens[c.currChunk] = uint64(len(c.compressed) + len(metaData)) - - if c.progressiveWrite { - _, err := c.w.Write(c.final) - if err != nil { - return err - } - c.final = c.final[:0] - } - - return nil -} - -// Add encodes the provided byte slice into the correct chunk for the provided -// doc num. You MUST call Add() with increasing docNums. -func (c *chunkedContentCoder) Add(docNum uint64, vals []byte) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // flush out the previous chunk details - err := c.flushContents() - if err != nil { - return err - } - // clearing the chunk specific meta for next chunk - c.chunkBuf.Reset() - c.chunkMetaBuf.Reset() - c.chunkMeta = c.chunkMeta[:0] - c.currChunk = chunk - } - - // get the starting offset for this doc - dvOffset := c.chunkBuf.Len() - dvSize, err := c.chunkBuf.Write(vals) - if err != nil { - return err - } - - c.chunkMeta = append(c.chunkMeta, MetaData{ - DocNum: docNum, - DocDvOffset: uint64(dvOffset + dvSize), - }) - return nil -} - -// Write commits all the encoded chunked contents to the provided writer. -// -// | ..... data ..... | chunk offsets (varints) -// | position of chunk offsets (uint64) | number of offsets (uint64) | -// -func (c *chunkedContentCoder) Write() (int, error) { - var tw int - - if c.final != nil { - // write out the data section first - nw, err := c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - } - - chunkOffsetsStart := uint64(tw) - - if cap(c.final) < binary.MaxVarintLen64 { - c.final = make([]byte, binary.MaxVarintLen64) - } else { - c.final = c.final[0:binary.MaxVarintLen64] - } - chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens) - // write out the chunk offsets - for _, chunkOffset := range chunkOffsets { - n := binary.PutUvarint(c.final, chunkOffset) - nw, err := c.w.Write(c.final[:n]) - tw += nw - if err != nil { - return tw, err - } - } - - chunkOffsetsLen := uint64(tw) - chunkOffsetsStart - - c.final = c.final[0:8] - // write out the length of chunk offsets - binary.BigEndian.PutUint64(c.final, chunkOffsetsLen) - nw, err := c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - - // write out the number of chunks - binary.BigEndian.PutUint64(c.final, uint64(len(c.chunkLens))) - nw, err = c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - - c.final = c.final[:0] - - return tw, nil -} - -// ReadDocValueBoundary elicits the start, end offsets from a -// metaData header slice -func ReadDocValueBoundary(chunk int, metaHeaders []MetaData) (uint64, uint64) { - var start uint64 - if chunk > 0 { - start = metaHeaders[chunk-1].DocDvOffset - } - return start, metaHeaders[chunk].DocDvOffset -} diff --git a/vendor/github.com/blevesearch/zapx/v14/count.go b/vendor/github.com/blevesearch/zapx/v14/count.go deleted file mode 100644 index b6135359f..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/count.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "hash/crc32" - "io" - - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -// CountHashWriter is a wrapper around a Writer which counts the number of -// bytes which have been written and computes a crc32 hash -type CountHashWriter struct { - w io.Writer - crc uint32 - n int - s segment.StatsReporter -} - -// NewCountHashWriter returns a CountHashWriter which wraps the provided Writer -func NewCountHashWriter(w io.Writer) *CountHashWriter { - return &CountHashWriter{w: w} -} - -func NewCountHashWriterWithStatsReporter(w io.Writer, s segment.StatsReporter) *CountHashWriter { - return &CountHashWriter{w: w, s: s} -} - -// Write writes the provided bytes to the wrapped writer and counts the bytes -func (c *CountHashWriter) Write(b []byte) (int, error) { - n, err := c.w.Write(b) - c.crc = crc32.Update(c.crc, crc32.IEEETable, b[:n]) - c.n += n - if c.s != nil { - c.s.ReportBytesWritten(uint64(n)) - } - return n, err -} - -// Count returns the number of bytes written -func (c *CountHashWriter) Count() int { - return c.n -} - -// Sum32 returns the CRC-32 hash of the content written to this writer -func (c *CountHashWriter) Sum32() uint32 { - return c.crc -} diff --git a/vendor/github.com/blevesearch/zapx/v14/dict.go b/vendor/github.com/blevesearch/zapx/v14/dict.go deleted file mode 100644 index e30bf2420..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/dict.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" -) - -// Dictionary is the zap representation of the term dictionary -type Dictionary struct { - sb *SegmentBase - field string - fieldID uint16 - fst *vellum.FST - fstReader *vellum.Reader -} - -// represents an immutable, empty dictionary -var emptyDictionary = &Dictionary{} - -// PostingsList returns the postings list for the specified term -func (d *Dictionary) PostingsList(term []byte, except *roaring.Bitmap, - prealloc segment.PostingsList) (segment.PostingsList, error) { - var preallocPL *PostingsList - pl, ok := prealloc.(*PostingsList) - if ok && pl != nil { - preallocPL = pl - } - return d.postingsList(term, except, preallocPL) -} - -func (d *Dictionary) postingsList(term []byte, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) { - if d.fstReader == nil { - if rv == nil || rv == emptyPostingsList { - return emptyPostingsList, nil - } - return d.postingsListInit(rv, except), nil - } - - postingsOffset, exists, err := d.fstReader.Get(term) - if err != nil { - return nil, fmt.Errorf("vellum err: %v", err) - } - if !exists { - if rv == nil || rv == emptyPostingsList { - return emptyPostingsList, nil - } - return d.postingsListInit(rv, except), nil - } - - return d.postingsListFromOffset(postingsOffset, except, rv) -} - -func (d *Dictionary) postingsListFromOffset(postingsOffset uint64, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) { - rv = d.postingsListInit(rv, except) - - err := rv.read(postingsOffset, d) - if err != nil { - return nil, err - } - - return rv, nil -} - -func (d *Dictionary) postingsListInit(rv *PostingsList, except *roaring.Bitmap) *PostingsList { - if rv == nil || rv == emptyPostingsList { - rv = &PostingsList{} - } else { - postings := rv.postings - if postings != nil { - postings.Clear() - } - - *rv = PostingsList{} // clear the struct - - rv.postings = postings - } - rv.sb = d.sb - rv.except = except - return rv -} - -func (d *Dictionary) Contains(key []byte) (bool, error) { - if d.fst != nil { - return d.fst.Contains(key) - } - return false, nil -} - -// AutomatonIterator returns an iterator which only visits terms -// having the the vellum automaton and start/end key range -func (d *Dictionary) AutomatonIterator(a segment.Automaton, - startKeyInclusive, endKeyExclusive []byte) segment.DictionaryIterator { - if d.fst != nil { - rv := &DictionaryIterator{ - d: d, - } - - itr, err := d.fst.Search(a, startKeyInclusive, endKeyExclusive) - if err == nil { - rv.itr = itr - } else if err != vellum.ErrIteratorDone { - rv.err = err - } - - return rv - } - return emptyDictionaryIterator -} - -// DictionaryIterator is an iterator for term dictionary -type DictionaryIterator struct { - d *Dictionary - itr vellum.Iterator - err error - tmp PostingsList - entry index.DictEntry - omitCount bool -} - -var emptyDictionaryIterator = &DictionaryIterator{} - -// Next returns the next entry in the dictionary -func (i *DictionaryIterator) Next() (*index.DictEntry, error) { - if i.err != nil && i.err != vellum.ErrIteratorDone { - return nil, i.err - } else if i.itr == nil || i.err == vellum.ErrIteratorDone { - return nil, nil - } - term, postingsOffset := i.itr.Current() - i.entry.Term = string(term) - if !i.omitCount { - i.err = i.tmp.read(postingsOffset, i.d) - if i.err != nil { - return nil, i.err - } - i.entry.Count = i.tmp.Count() - } - i.err = i.itr.Next() - return &i.entry, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v14/docvalues.go b/vendor/github.com/blevesearch/zapx/v14/docvalues.go deleted file mode 100644 index a530aa5ad..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/docvalues.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "fmt" - "math" - "reflect" - "sort" - - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/golang/snappy" -) - -var reflectStaticSizedocValueReader int - -func init() { - var dvi docValueReader - reflectStaticSizedocValueReader = int(reflect.TypeOf(dvi).Size()) -} - -type docNumTermsVisitor func(docNum uint64, terms []byte) error - -type docVisitState struct { - dvrs map[uint16]*docValueReader - segment *SegmentBase -} - -type docValueReader struct { - field string - curChunkNum uint64 - chunkOffsets []uint64 - dvDataLoc uint64 - curChunkHeader []MetaData - curChunkData []byte // compressed data cache - uncompressed []byte // temp buf for snappy decompression -} - -func (di *docValueReader) size() int { - return reflectStaticSizedocValueReader + SizeOfPtr + - len(di.field) + - len(di.chunkOffsets)*SizeOfUint64 + - len(di.curChunkHeader)*reflectStaticSizeMetaData + - len(di.curChunkData) -} - -func (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader { - if rv == nil { - rv = &docValueReader{} - } - - rv.field = di.field - rv.curChunkNum = math.MaxUint64 - rv.chunkOffsets = di.chunkOffsets // immutable, so it's sharable - rv.dvDataLoc = di.dvDataLoc - rv.curChunkHeader = rv.curChunkHeader[:0] - rv.curChunkData = nil - rv.uncompressed = rv.uncompressed[:0] - - return rv -} - -func (di *docValueReader) curChunkNumber() uint64 { - return di.curChunkNum -} - -func (s *SegmentBase) loadFieldDocValueReader(field string, - fieldDvLocStart, fieldDvLocEnd uint64) (*docValueReader, error) { - // get the docValue offset for the given fields - if fieldDvLocStart == fieldNotUninverted { - // no docValues found, nothing to do - return nil, nil - } - - // read the number of chunks, and chunk offsets position - var numChunks, chunkOffsetsPosition uint64 - - if fieldDvLocEnd-fieldDvLocStart > 16 { - numChunks = binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-8 : fieldDvLocEnd]) - // read the length of chunk offsets - chunkOffsetsLen := binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-16 : fieldDvLocEnd-8]) - // acquire position of chunk offsets - chunkOffsetsPosition = (fieldDvLocEnd - 16) - chunkOffsetsLen - } else { - return nil, fmt.Errorf("loadFieldDocValueReader: fieldDvLoc too small: %d-%d", fieldDvLocEnd, fieldDvLocStart) - } - - fdvIter := &docValueReader{ - curChunkNum: math.MaxUint64, - field: field, - chunkOffsets: make([]uint64, int(numChunks)), - } - - // read the chunk offsets - var offset uint64 - for i := 0; i < int(numChunks); i++ { - loc, read := binary.Uvarint(s.mem[chunkOffsetsPosition+offset : chunkOffsetsPosition+offset+binary.MaxVarintLen64]) - if read <= 0 { - return nil, fmt.Errorf("corrupted chunk offset during segment load") - } - fdvIter.chunkOffsets[i] = loc - offset += uint64(read) - } - - // set the data offset - fdvIter.dvDataLoc = fieldDvLocStart - - return fdvIter, nil -} - -func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error { - // advance to the chunk where the docValues - // reside for the given docNum - destChunkDataLoc, curChunkEnd := di.dvDataLoc, di.dvDataLoc - start, end := readChunkBoundary(int(chunkNumber), di.chunkOffsets) - if start >= end { - di.curChunkHeader = di.curChunkHeader[:0] - di.curChunkData = nil - di.curChunkNum = chunkNumber - di.uncompressed = di.uncompressed[:0] - return nil - } - - destChunkDataLoc += start - curChunkEnd += end - - // read the number of docs reside in the chunk - numDocs, read := binary.Uvarint(s.mem[destChunkDataLoc : destChunkDataLoc+binary.MaxVarintLen64]) - if read <= 0 { - return fmt.Errorf("failed to read the chunk") - } - chunkMetaLoc := destChunkDataLoc + uint64(read) - - offset := uint64(0) - if cap(di.curChunkHeader) < int(numDocs) { - di.curChunkHeader = make([]MetaData, int(numDocs)) - } else { - di.curChunkHeader = di.curChunkHeader[:int(numDocs)] - } - for i := 0; i < int(numDocs); i++ { - di.curChunkHeader[i].DocNum, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) - offset += uint64(read) - di.curChunkHeader[i].DocDvOffset, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) - offset += uint64(read) - } - - compressedDataLoc := chunkMetaLoc + offset - dataLength := curChunkEnd - compressedDataLoc - di.curChunkData = s.mem[compressedDataLoc : compressedDataLoc+dataLength] - di.curChunkNum = chunkNumber - di.uncompressed = di.uncompressed[:0] - return nil -} - -func (di *docValueReader) iterateAllDocValues(s *SegmentBase, visitor docNumTermsVisitor) error { - for i := 0; i < len(di.chunkOffsets); i++ { - err := di.loadDvChunk(uint64(i), s) - if err != nil { - return err - } - if di.curChunkData == nil || len(di.curChunkHeader) == 0 { - continue - } - - // uncompress the already loaded data - uncompressed, err := snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData) - if err != nil { - return err - } - di.uncompressed = uncompressed - - start := uint64(0) - for _, entry := range di.curChunkHeader { - err = visitor(entry.DocNum, uncompressed[start:entry.DocDvOffset]) - if err != nil { - return err - } - - start = entry.DocDvOffset - } - } - - return nil -} - -func (di *docValueReader) visitDocValues(docNum uint64, - visitor index.DocValueVisitor) error { - // binary search the term locations for the docNum - start, end := di.getDocValueLocs(docNum) - if start == math.MaxUint64 || end == math.MaxUint64 || start == end { - return nil - } - - var uncompressed []byte - var err error - // use the uncompressed copy if available - if len(di.uncompressed) > 0 { - uncompressed = di.uncompressed - } else { - // uncompress the already loaded data - uncompressed, err = snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData) - if err != nil { - return err - } - di.uncompressed = uncompressed - } - - // pick the terms for the given docNum - uncompressed = uncompressed[start:end] - for { - i := bytes.Index(uncompressed, termSeparatorSplitSlice) - if i < 0 { - break - } - - visitor(di.field, uncompressed[0:i]) - uncompressed = uncompressed[i+1:] - } - - return nil -} - -func (di *docValueReader) getDocValueLocs(docNum uint64) (uint64, uint64) { - i := sort.Search(len(di.curChunkHeader), func(i int) bool { - return di.curChunkHeader[i].DocNum >= docNum - }) - if i < len(di.curChunkHeader) && di.curChunkHeader[i].DocNum == docNum { - return ReadDocValueBoundary(i, di.curChunkHeader) - } - return math.MaxUint64, math.MaxUint64 -} - -// VisitDocValues is an implementation of the -// DocValueVisitable interface -func (s *SegmentBase) VisitDocValues(localDocNum uint64, fields []string, - visitor index.DocValueVisitor, dvsIn segment.DocVisitState) ( - segment.DocVisitState, error) { - dvs, ok := dvsIn.(*docVisitState) - if !ok || dvs == nil { - dvs = &docVisitState{} - } else { - if dvs.segment != s { - dvs.segment = s - dvs.dvrs = nil - } - } - - var fieldIDPlus1 uint16 - if dvs.dvrs == nil { - dvs.dvrs = make(map[uint16]*docValueReader, len(fields)) - for _, field := range fields { - if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { - continue - } - fieldID := fieldIDPlus1 - 1 - if dvIter, exists := s.fieldDvReaders[fieldID]; exists && - dvIter != nil { - dvs.dvrs[fieldID] = dvIter.cloneInto(dvs.dvrs[fieldID]) - } - } - } - - // find the chunkNumber where the docValues are stored - // NOTE: doc values continue to use legacy chunk mode - chunkFactor, err := getChunkSize(LegacyChunkMode, 0, 0) - if err != nil { - return nil, err - } - docInChunk := localDocNum / chunkFactor - var dvr *docValueReader - for _, field := range fields { - if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { - continue - } - fieldID := fieldIDPlus1 - 1 - if dvr, ok = dvs.dvrs[fieldID]; ok && dvr != nil { - // check if the chunk is already loaded - if docInChunk != dvr.curChunkNumber() { - err := dvr.loadDvChunk(docInChunk, s) - if err != nil { - return dvs, err - } - } - - _ = dvr.visitDocValues(localDocNum, visitor) - } - } - return dvs, nil -} - -// VisitableDocValueFields returns the list of fields with -// persisted doc value terms ready to be visitable using the -// VisitDocumentFieldTerms method. -func (s *SegmentBase) VisitableDocValueFields() ([]string, error) { - return s.fieldDvNames, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v14/enumerator.go b/vendor/github.com/blevesearch/zapx/v14/enumerator.go deleted file mode 100644 index 972a22416..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/enumerator.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - - "github.com/blevesearch/vellum" -) - -// enumerator provides an ordered traversal of multiple vellum -// iterators. Like JOIN of iterators, the enumerator produces a -// sequence of (key, iteratorIndex, value) tuples, sorted by key ASC, -// then iteratorIndex ASC, where the same key might be seen or -// repeated across multiple child iterators. -type enumerator struct { - itrs []vellum.Iterator - currKs [][]byte - currVs []uint64 - - lowK []byte - lowIdxs []int - lowCurr int -} - -// newEnumerator returns a new enumerator over the vellum Iterators -func newEnumerator(itrs []vellum.Iterator) (*enumerator, error) { - rv := &enumerator{ - itrs: itrs, - currKs: make([][]byte, len(itrs)), - currVs: make([]uint64, len(itrs)), - lowIdxs: make([]int, 0, len(itrs)), - } - for i, itr := range rv.itrs { - rv.currKs[i], rv.currVs[i] = itr.Current() - } - rv.updateMatches(false) - if rv.lowK == nil && len(rv.lowIdxs) == 0 { - return rv, vellum.ErrIteratorDone - } - return rv, nil -} - -// updateMatches maintains the low key matches based on the currKs -func (m *enumerator) updateMatches(skipEmptyKey bool) { - m.lowK = nil - m.lowIdxs = m.lowIdxs[:0] - m.lowCurr = 0 - - for i, key := range m.currKs { - if (key == nil && m.currVs[i] == 0) || // in case of empty iterator - (len(key) == 0 && skipEmptyKey) { // skip empty keys - continue - } - - cmp := bytes.Compare(key, m.lowK) - if cmp < 0 || len(m.lowIdxs) == 0 { - // reached a new low - m.lowK = key - m.lowIdxs = m.lowIdxs[:0] - m.lowIdxs = append(m.lowIdxs, i) - } else if cmp == 0 { - m.lowIdxs = append(m.lowIdxs, i) - } - } -} - -// Current returns the enumerator's current key, iterator-index, and -// value. If the enumerator is not pointing at a valid value (because -// Next returned an error previously), Current will return nil,0,0. -func (m *enumerator) Current() ([]byte, int, uint64) { - var i int - var v uint64 - if m.lowCurr < len(m.lowIdxs) { - i = m.lowIdxs[m.lowCurr] - v = m.currVs[i] - } - return m.lowK, i, v -} - -// GetLowIdxsAndValues will return all of the iterator indices -// which point to the current key, and their corresponding -// values. This can be used by advanced caller which may need -// to peek into these other sets of data before processing. -func (m *enumerator) GetLowIdxsAndValues() ([]int, []uint64) { - values := make([]uint64, 0, len(m.lowIdxs)) - for _, idx := range m.lowIdxs { - values = append(values, m.currVs[idx]) - } - return m.lowIdxs, values -} - -// Next advances the enumerator to the next key/iterator/value result, -// else vellum.ErrIteratorDone is returned. -func (m *enumerator) Next() error { - m.lowCurr += 1 - if m.lowCurr >= len(m.lowIdxs) { - // move all the current low iterators forwards - for _, vi := range m.lowIdxs { - err := m.itrs[vi].Next() - if err != nil && err != vellum.ErrIteratorDone { - return err - } - m.currKs[vi], m.currVs[vi] = m.itrs[vi].Current() - } - // can skip any empty keys encountered at this point - m.updateMatches(true) - } - if m.lowK == nil && len(m.lowIdxs) == 0 { - return vellum.ErrIteratorDone - } - return nil -} - -// Close all the underlying Iterators. The first error, if any, will -// be returned. -func (m *enumerator) Close() error { - var rv error - for _, itr := range m.itrs { - err := itr.Close() - if rv == nil { - rv = err - } - } - return rv -} diff --git a/vendor/github.com/blevesearch/zapx/v14/go.mod b/vendor/github.com/blevesearch/zapx/v14/go.mod deleted file mode 100644 index 762a0807d..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/blevesearch/zapx/v14 - -go 1.12 - -require ( - github.com/RoaringBitmap/roaring v0.4.23 - github.com/blevesearch/bleve_index_api v1.0.0 - github.com/blevesearch/mmap-go v1.0.2 - github.com/blevesearch/scorch_segment_api/v2 v2.0.1 - github.com/blevesearch/vellum v1.0.3 - github.com/golang/snappy v0.0.1 - github.com/spf13/cobra v0.0.5 -) diff --git a/vendor/github.com/blevesearch/zapx/v14/go.sum b/vendor/github.com/blevesearch/zapx/v14/go.sum deleted file mode 100644 index 68e45348c..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/go.sum +++ /dev/null @@ -1,73 +0,0 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= -github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= -github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= -github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1 h1:fd+hPtZ8GsbqPK1HslGp7Vhoik4arZteA/IsCEgOisw= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1/go.mod h1:lq7yK2jQy1yQjtjTfU931aVqz7pYxEudHaDwOt1tXfU= -github.com/blevesearch/vellum v1.0.3 h1:U86G41A7CtXNzzpIJHM8lSTUqz1Mp8U870TkcdCzZc8= -github.com/blevesearch/vellum v1.0.3/go.mod h1:2u5ax02KeDuNWu4/C+hVQMD6uLN4txH1JbtpaDNLJRo= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= -github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/blevesearch/zapx/v14/intDecoder.go b/vendor/github.com/blevesearch/zapx/v14/intDecoder.go deleted file mode 100644 index 1b839e1c6..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/intDecoder.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2019 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "fmt" -) - -type chunkedIntDecoder struct { - startOffset uint64 - dataStartOffset uint64 - chunkOffsets []uint64 - curChunkBytes []byte - data []byte - r *memUvarintReader -} - -// newChunkedIntDecoder expects an optional or reset chunkedIntDecoder for better reuse. -func newChunkedIntDecoder(buf []byte, offset uint64, rv *chunkedIntDecoder) *chunkedIntDecoder { - if rv == nil { - rv = &chunkedIntDecoder{startOffset: offset, data: buf} - } else { - rv.startOffset = offset - rv.data = buf - } - - var n, numChunks uint64 - var read int - if offset == termNotEncoded { - numChunks = 0 - } else { - numChunks, read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64]) - } - - n += uint64(read) - if cap(rv.chunkOffsets) >= int(numChunks) { - rv.chunkOffsets = rv.chunkOffsets[:int(numChunks)] - } else { - rv.chunkOffsets = make([]uint64, int(numChunks)) - } - for i := 0; i < int(numChunks); i++ { - rv.chunkOffsets[i], read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64]) - n += uint64(read) - } - rv.dataStartOffset = offset + n - return rv -} - -func (d *chunkedIntDecoder) loadChunk(chunk int) error { - if d.startOffset == termNotEncoded { - d.r = newMemUvarintReader([]byte(nil)) - return nil - } - - if chunk >= len(d.chunkOffsets) { - return fmt.Errorf("tried to load freq chunk that doesn't exist %d/(%d)", - chunk, len(d.chunkOffsets)) - } - - end, start := d.dataStartOffset, d.dataStartOffset - s, e := readChunkBoundary(chunk, d.chunkOffsets) - start += s - end += e - d.curChunkBytes = d.data[start:end] - if d.r == nil { - d.r = newMemUvarintReader(d.curChunkBytes) - } else { - d.r.Reset(d.curChunkBytes) - } - - return nil -} - -func (d *chunkedIntDecoder) reset() { - d.startOffset = 0 - d.dataStartOffset = 0 - d.chunkOffsets = d.chunkOffsets[:0] - d.curChunkBytes = d.curChunkBytes[:0] - d.data = d.data[:0] - if d.r != nil { - d.r.Reset([]byte(nil)) - } -} - -func (d *chunkedIntDecoder) isNil() bool { - return d.curChunkBytes == nil || len(d.curChunkBytes) == 0 -} - -func (d *chunkedIntDecoder) readUvarint() (uint64, error) { - return d.r.ReadUvarint() -} - -func (d *chunkedIntDecoder) SkipUvarint() { - d.r.SkipUvarint() -} - -func (d *chunkedIntDecoder) SkipBytes(count int) { - d.r.SkipBytes(count) -} - -func (d *chunkedIntDecoder) Len() int { - return d.r.Len() -} diff --git a/vendor/github.com/blevesearch/zapx/v14/intcoder.go b/vendor/github.com/blevesearch/zapx/v14/intcoder.go deleted file mode 100644 index c3c488fb7..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/intcoder.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "io" -) - -// We can safely use 0 to represent termNotEncoded since 0 -// could never be a valid address for term location information. -// (stored field index is always non-empty and earlier in the -// file) -const termNotEncoded = 0 - -type chunkedIntCoder struct { - final []byte - chunkSize uint64 - chunkBuf bytes.Buffer - chunkLens []uint64 - currChunk uint64 - - buf []byte -} - -// newChunkedIntCoder returns a new chunk int coder which packs data into -// chunks based on the provided chunkSize and supports up to the specified -// maxDocNum -func newChunkedIntCoder(chunkSize uint64, maxDocNum uint64) *chunkedIntCoder { - total := maxDocNum/chunkSize + 1 - rv := &chunkedIntCoder{ - chunkSize: chunkSize, - chunkLens: make([]uint64, total), - final: make([]byte, 0, 64), - } - - return rv -} - -// Reset lets you reuse this chunked int coder. buffers are reset and reused -// from previous use. you cannot change the chunk size or max doc num. -func (c *chunkedIntCoder) Reset() { - c.final = c.final[:0] - c.chunkBuf.Reset() - c.currChunk = 0 - for i := range c.chunkLens { - c.chunkLens[i] = 0 - } -} - -// SetChunkSize changes the chunk size. It is only valid to do so -// with a new chunkedIntCoder, or immediately after calling Reset() -func (c *chunkedIntCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { - total := int(maxDocNum/chunkSize + 1) - c.chunkSize = chunkSize - if cap(c.chunkLens) < total { - c.chunkLens = make([]uint64, total) - } else { - c.chunkLens = c.chunkLens[:total] - } -} - -// Add encodes the provided integers into the correct chunk for the provided -// doc num. You MUST call Add() with increasing docNums. -func (c *chunkedIntCoder) Add(docNum uint64, vals ...uint64) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // starting a new chunk - c.Close() - c.chunkBuf.Reset() - c.currChunk = chunk - } - - if len(c.buf) < binary.MaxVarintLen64 { - c.buf = make([]byte, binary.MaxVarintLen64) - } - - for _, val := range vals { - wb := binary.PutUvarint(c.buf, val) - _, err := c.chunkBuf.Write(c.buf[:wb]) - if err != nil { - return err - } - } - - return nil -} - -func (c *chunkedIntCoder) AddBytes(docNum uint64, buf []byte) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // starting a new chunk - c.Close() - c.chunkBuf.Reset() - c.currChunk = chunk - } - - _, err := c.chunkBuf.Write(buf) - return err -} - -// Close indicates you are done calling Add() this allows the final chunk -// to be encoded. -func (c *chunkedIntCoder) Close() { - encodingBytes := c.chunkBuf.Bytes() - c.chunkLens[c.currChunk] = uint64(len(encodingBytes)) - c.final = append(c.final, encodingBytes...) - c.currChunk = uint64(cap(c.chunkLens)) // sentinel to detect double close -} - -// Write commits all the encoded chunked integers to the provided writer. -func (c *chunkedIntCoder) Write(w io.Writer) (int, error) { - bufNeeded := binary.MaxVarintLen64 * (1 + len(c.chunkLens)) - if len(c.buf) < bufNeeded { - c.buf = make([]byte, bufNeeded) - } - buf := c.buf - - // convert the chunk lengths into chunk offsets - chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens) - - // write out the number of chunks & each chunk offsets - n := binary.PutUvarint(buf, uint64(len(chunkOffsets))) - for _, chunkOffset := range chunkOffsets { - n += binary.PutUvarint(buf[n:], chunkOffset) - } - - tw, err := w.Write(buf[:n]) - if err != nil { - return tw, err - } - - // write out the data - nw, err := w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - return tw, nil -} - -// writeAt commits all the encoded chunked integers to the provided writer -// and returns the starting offset, total bytes written and an error -func (c *chunkedIntCoder) writeAt(w io.Writer) (uint64, int, error) { - startOffset := uint64(termNotEncoded) - if len(c.final) <= 0 { - return startOffset, 0, nil - } - - if chw := w.(*CountHashWriter); chw != nil { - startOffset = uint64(chw.Count()) - } - - tw, err := c.Write(w) - return startOffset, tw, err -} - -func (c *chunkedIntCoder) FinalSize() int { - return len(c.final) -} - -// modifyLengthsToEndOffsets converts the chunk length array -// to a chunk offset array. The readChunkBoundary -// will figure out the start and end of every chunk from -// these offsets. Starting offset of i'th index is stored -// in i-1'th position except for 0'th index and ending offset -// is stored at i'th index position. -// For 0'th element, starting position is always zero. -// eg: -// Lens -> 5 5 5 5 => 5 10 15 20 -// Lens -> 0 5 0 5 => 0 5 5 10 -// Lens -> 0 0 0 5 => 0 0 0 5 -// Lens -> 5 0 0 0 => 5 5 5 5 -// Lens -> 0 5 0 0 => 0 5 5 5 -// Lens -> 0 0 5 0 => 0 0 5 5 -func modifyLengthsToEndOffsets(lengths []uint64) []uint64 { - var runningOffset uint64 - var index, i int - for i = 1; i <= len(lengths); i++ { - runningOffset += lengths[i-1] - lengths[index] = runningOffset - index++ - } - return lengths -} - -func readChunkBoundary(chunk int, offsets []uint64) (uint64, uint64) { - var start uint64 - if chunk > 0 { - start = offsets[chunk-1] - } - return start, offsets[chunk] -} diff --git a/vendor/github.com/blevesearch/zapx/v14/merge.go b/vendor/github.com/blevesearch/zapx/v14/merge.go deleted file mode 100644 index 6a853a16a..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/merge.go +++ /dev/null @@ -1,843 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "math" - "os" - "sort" - - "github.com/RoaringBitmap/roaring" - seg "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - "github.com/golang/snappy" -) - -var DefaultFileMergerBufferSize = 1024 * 1024 - -const docDropped = math.MaxUint64 // sentinel docNum to represent a deleted doc - -// Merge takes a slice of segments and bit masks describing which -// documents may be dropped, and creates a new segment containing the -// remaining data. This new segment is built at the specified path. -func (*ZapPlugin) Merge(segments []seg.Segment, drops []*roaring.Bitmap, path string, - closeCh chan struct{}, s seg.StatsReporter) ( - [][]uint64, uint64, error) { - segmentBases := make([]*SegmentBase, len(segments)) - for segmenti, segment := range segments { - switch segmentx := segment.(type) { - case *Segment: - segmentBases[segmenti] = &segmentx.SegmentBase - case *SegmentBase: - segmentBases[segmenti] = segmentx - default: - panic(fmt.Sprintf("oops, unexpected segment type: %T", segment)) - } - } - return mergeSegmentBases(segmentBases, drops, path, DefaultChunkMode, closeCh, s) -} - -func mergeSegmentBases(segmentBases []*SegmentBase, drops []*roaring.Bitmap, path string, - chunkMode uint32, closeCh chan struct{}, s seg.StatsReporter) ( - [][]uint64, uint64, error) { - flag := os.O_RDWR | os.O_CREATE - - f, err := os.OpenFile(path, flag, 0600) - if err != nil { - return nil, 0, err - } - - cleanup := func() { - _ = f.Close() - _ = os.Remove(path) - } - - // buffer the output - br := bufio.NewWriterSize(f, DefaultFileMergerBufferSize) - - // wrap it for counting (tracking offsets) - cr := NewCountHashWriterWithStatsReporter(br, s) - - newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, _, _, _, err := - MergeToWriter(segmentBases, drops, chunkMode, cr, closeCh) - if err != nil { - cleanup() - return nil, 0, err - } - - err = persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset, - docValueOffset, chunkMode, cr.Sum32(), cr) - if err != nil { - cleanup() - return nil, 0, err - } - - err = br.Flush() - if err != nil { - cleanup() - return nil, 0, err - } - - err = f.Sync() - if err != nil { - cleanup() - return nil, 0, err - } - - err = f.Close() - if err != nil { - cleanup() - return nil, 0, err - } - - return newDocNums, uint64(cr.Count()), nil -} - -func MergeToWriter(segments []*SegmentBase, drops []*roaring.Bitmap, - chunkMode uint32, cr *CountHashWriter, closeCh chan struct{}) ( - newDocNums [][]uint64, - numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64, - dictLocs []uint64, fieldsInv []string, fieldsMap map[string]uint16, - err error) { - docValueOffset = uint64(fieldNotUninverted) - - var fieldsSame bool - fieldsSame, fieldsInv = mergeFields(segments) - fieldsMap = mapFields(fieldsInv) - - numDocs = computeNewDocCount(segments, drops) - - if isClosed(closeCh) { - return nil, 0, 0, 0, 0, nil, nil, nil, seg.ErrClosed - } - - if numDocs > 0 { - storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops, - fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - - dictLocs, docValueOffset, err = persistMergedRest(segments, drops, - fieldsInv, fieldsMap, fieldsSame, - newDocNums, numDocs, chunkMode, cr, closeCh) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - } else { - dictLocs = make([]uint64, len(fieldsInv)) - } - - fieldsIndexOffset, err = persistFields(fieldsInv, cr, dictLocs) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - - return newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, dictLocs, fieldsInv, fieldsMap, nil -} - -// mapFields takes the fieldsInv list and returns a map of fieldName -// to fieldID+1 -func mapFields(fields []string) map[string]uint16 { - rv := make(map[string]uint16, len(fields)) - for i, fieldName := range fields { - rv[fieldName] = uint16(i) + 1 - } - return rv -} - -// computeNewDocCount determines how many documents will be in the newly -// merged segment when obsoleted docs are dropped -func computeNewDocCount(segments []*SegmentBase, drops []*roaring.Bitmap) uint64 { - var newDocCount uint64 - for segI, segment := range segments { - newDocCount += segment.numDocs - if drops[segI] != nil { - newDocCount -= drops[segI].GetCardinality() - } - } - return newDocCount -} - -func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap, - fieldsInv []string, fieldsMap map[string]uint16, fieldsSame bool, - newDocNumsIn [][]uint64, newSegDocCount uint64, chunkMode uint32, - w *CountHashWriter, closeCh chan struct{}) ([]uint64, uint64, error) { - var bufMaxVarintLen64 []byte = make([]byte, binary.MaxVarintLen64) - var bufLoc []uint64 - - var postings *PostingsList - var postItr *PostingsIterator - - rv := make([]uint64, len(fieldsInv)) - fieldDvLocsStart := make([]uint64, len(fieldsInv)) - fieldDvLocsEnd := make([]uint64, len(fieldsInv)) - - // these int coders are initialized with chunk size 1024 - // however this will be reset to the correct chunk size - // while processing each individual field-term section - tfEncoder := newChunkedIntCoder(1024, newSegDocCount-1) - locEncoder := newChunkedIntCoder(1024, newSegDocCount-1) - - var vellumBuf bytes.Buffer - newVellum, err := vellum.New(&vellumBuf, nil) - if err != nil { - return nil, 0, err - } - - newRoaring := roaring.NewBitmap() - - // for each field - for fieldID, fieldName := range fieldsInv { - // collect FST iterators from all active segments for this field - var newDocNums [][]uint64 - var drops []*roaring.Bitmap - var dicts []*Dictionary - var itrs []vellum.Iterator - - var segmentsInFocus []*SegmentBase - - for segmentI, segment := range segments { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - dict, err2 := segment.dictionary(fieldName) - if err2 != nil { - return nil, 0, err2 - } - if dict != nil && dict.fst != nil { - itr, err2 := dict.fst.Iterator(nil, nil) - if err2 != nil && err2 != vellum.ErrIteratorDone { - return nil, 0, err2 - } - if itr != nil { - newDocNums = append(newDocNums, newDocNumsIn[segmentI]) - if dropsIn[segmentI] != nil && !dropsIn[segmentI].IsEmpty() { - drops = append(drops, dropsIn[segmentI]) - } else { - drops = append(drops, nil) - } - dicts = append(dicts, dict) - itrs = append(itrs, itr) - segmentsInFocus = append(segmentsInFocus, segment) - } - } - } - - var prevTerm []byte - - newRoaring.Clear() - - var lastDocNum, lastFreq, lastNorm uint64 - - // determines whether to use "1-hit" encoding optimization - // when a term appears in only 1 doc, with no loc info, - // has freq of 1, and the docNum fits into 31-bits - use1HitEncoding := func(termCardinality uint64) (bool, uint64, uint64) { - if termCardinality == uint64(1) && locEncoder.FinalSize() <= 0 { - docNum := uint64(newRoaring.Minimum()) - if under32Bits(docNum) && docNum == lastDocNum && lastFreq == 1 { - return true, docNum, lastNorm - } - } - return false, 0, 0 - } - - finishTerm := func(term []byte) error { - tfEncoder.Close() - locEncoder.Close() - - postingsOffset, err := writePostings(newRoaring, - tfEncoder, locEncoder, use1HitEncoding, w, bufMaxVarintLen64) - if err != nil { - return err - } - - if postingsOffset > 0 { - err = newVellum.Insert(term, postingsOffset) - if err != nil { - return err - } - } - - newRoaring.Clear() - - tfEncoder.Reset() - locEncoder.Reset() - - lastDocNum = 0 - lastFreq = 0 - lastNorm = 0 - - return nil - } - - enumerator, err := newEnumerator(itrs) - - for err == nil { - term, itrI, postingsOffset := enumerator.Current() - - if !bytes.Equal(prevTerm, term) { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - // if the term changed, write out the info collected - // for the previous term - err = finishTerm(prevTerm) - if err != nil { - return nil, 0, err - } - } - if !bytes.Equal(prevTerm, term) || prevTerm == nil { - // compute cardinality of field-term in new seg - var newCard uint64 - lowItrIdxs, lowItrVals := enumerator.GetLowIdxsAndValues() - for i, idx := range lowItrIdxs { - pl, err := dicts[idx].postingsListFromOffset(lowItrVals[i], drops[idx], nil) - if err != nil { - return nil, 0, err - } - newCard += pl.Count() - } - // compute correct chunk size with this - chunkSize, err := getChunkSize(chunkMode, newCard, newSegDocCount) - if err != nil { - return nil, 0, err - } - // update encoders chunk - tfEncoder.SetChunkSize(chunkSize, newSegDocCount-1) - locEncoder.SetChunkSize(chunkSize, newSegDocCount-1) - } - - postings, err = dicts[itrI].postingsListFromOffset( - postingsOffset, drops[itrI], postings) - if err != nil { - return nil, 0, err - } - - postItr = postings.iterator(true, true, true, postItr) - - // can no longer optimize by copying, since chunk factor could have changed - lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs( - fieldsMap, term, postItr, newDocNums[itrI], newRoaring, - tfEncoder, locEncoder, bufLoc) - - if err != nil { - return nil, 0, err - } - - prevTerm = prevTerm[:0] // copy to prevTerm in case Next() reuses term mem - prevTerm = append(prevTerm, term...) - - err = enumerator.Next() - } - if err != vellum.ErrIteratorDone { - return nil, 0, err - } - - err = finishTerm(prevTerm) - if err != nil { - return nil, 0, err - } - - dictOffset := uint64(w.Count()) - - err = newVellum.Close() - if err != nil { - return nil, 0, err - } - vellumData := vellumBuf.Bytes() - - // write out the length of the vellum data - n := binary.PutUvarint(bufMaxVarintLen64, uint64(len(vellumData))) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return nil, 0, err - } - - // write this vellum to disk - _, err = w.Write(vellumData) - if err != nil { - return nil, 0, err - } - - rv[fieldID] = dictOffset - - // get the field doc value offset (start) - fieldDvLocsStart[fieldID] = uint64(w.Count()) - - // update the field doc values - // NOTE: doc values continue to use legacy chunk mode - chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0) - if err != nil { - return nil, 0, err - } - fdvEncoder := newChunkedContentCoder(chunkSize, newSegDocCount-1, w, true) - - fdvReadersAvailable := false - var dvIterClone *docValueReader - for segmentI, segment := range segmentsInFocus { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - fieldIDPlus1 := uint16(segment.fieldsMap[fieldName]) - if dvIter, exists := segment.fieldDvReaders[fieldIDPlus1-1]; exists && - dvIter != nil { - fdvReadersAvailable = true - dvIterClone = dvIter.cloneInto(dvIterClone) - err = dvIterClone.iterateAllDocValues(segment, func(docNum uint64, terms []byte) error { - if newDocNums[segmentI][docNum] == docDropped { - return nil - } - err := fdvEncoder.Add(newDocNums[segmentI][docNum], terms) - if err != nil { - return err - } - return nil - }) - if err != nil { - return nil, 0, err - } - } - } - - if fdvReadersAvailable { - err = fdvEncoder.Close() - if err != nil { - return nil, 0, err - } - - // persist the doc value details for this field - _, err = fdvEncoder.Write() - if err != nil { - return nil, 0, err - } - - // get the field doc value offset (end) - fieldDvLocsEnd[fieldID] = uint64(w.Count()) - } else { - fieldDvLocsStart[fieldID] = fieldNotUninverted - fieldDvLocsEnd[fieldID] = fieldNotUninverted - } - - // reset vellum buffer and vellum builder - vellumBuf.Reset() - err = newVellum.Reset(&vellumBuf) - if err != nil { - return nil, 0, err - } - } - - fieldDvLocsOffset := uint64(w.Count()) - - buf := bufMaxVarintLen64 - for i := 0; i < len(fieldDvLocsStart); i++ { - n := binary.PutUvarint(buf, fieldDvLocsStart[i]) - _, err := w.Write(buf[:n]) - if err != nil { - return nil, 0, err - } - n = binary.PutUvarint(buf, fieldDvLocsEnd[i]) - _, err = w.Write(buf[:n]) - if err != nil { - return nil, 0, err - } - } - - return rv, fieldDvLocsOffset, nil -} - -func mergeTermFreqNormLocs(fieldsMap map[string]uint16, term []byte, postItr *PostingsIterator, - newDocNums []uint64, newRoaring *roaring.Bitmap, - tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, bufLoc []uint64) ( - lastDocNum uint64, lastFreq uint64, lastNorm uint64, bufLocOut []uint64, err error) { - next, err := postItr.Next() - for next != nil && err == nil { - hitNewDocNum := newDocNums[next.Number()] - if hitNewDocNum == docDropped { - return 0, 0, 0, nil, fmt.Errorf("see hit with dropped docNum") - } - - newRoaring.Add(uint32(hitNewDocNum)) - - nextFreq := next.Frequency() - nextNorm := uint64(math.Float32bits(float32(next.Norm()))) - - locs := next.Locations() - - err = tfEncoder.Add(hitNewDocNum, - encodeFreqHasLocs(nextFreq, len(locs) > 0), nextNorm) - if err != nil { - return 0, 0, 0, nil, err - } - - if len(locs) > 0 { - numBytesLocs := 0 - for _, loc := range locs { - ap := loc.ArrayPositions() - numBytesLocs += totalUvarintBytes(uint64(fieldsMap[loc.Field()]-1), - loc.Pos(), loc.Start(), loc.End(), uint64(len(ap)), ap) - } - - err = locEncoder.Add(hitNewDocNum, uint64(numBytesLocs)) - if err != nil { - return 0, 0, 0, nil, err - } - - for _, loc := range locs { - ap := loc.ArrayPositions() - if cap(bufLoc) < 5+len(ap) { - bufLoc = make([]uint64, 0, 5+len(ap)) - } - args := bufLoc[0:5] - args[0] = uint64(fieldsMap[loc.Field()] - 1) - args[1] = loc.Pos() - args[2] = loc.Start() - args[3] = loc.End() - args[4] = uint64(len(ap)) - args = append(args, ap...) - err = locEncoder.Add(hitNewDocNum, args...) - if err != nil { - return 0, 0, 0, nil, err - } - } - } - - lastDocNum = hitNewDocNum - lastFreq = nextFreq - lastNorm = nextNorm - - next, err = postItr.Next() - } - - return lastDocNum, lastFreq, lastNorm, bufLoc, err -} - -func writePostings(postings *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder, - use1HitEncoding func(uint64) (bool, uint64, uint64), - w *CountHashWriter, bufMaxVarintLen64 []byte) ( - offset uint64, err error) { - termCardinality := postings.GetCardinality() - if termCardinality <= 0 { - return 0, nil - } - - if use1HitEncoding != nil { - encodeAs1Hit, docNum1Hit, normBits1Hit := use1HitEncoding(termCardinality) - if encodeAs1Hit { - return FSTValEncode1Hit(docNum1Hit, normBits1Hit), nil - } - } - - var tfOffset uint64 - tfOffset, _, err = tfEncoder.writeAt(w) - if err != nil { - return 0, err - } - - var locOffset uint64 - locOffset, _, err = locEncoder.writeAt(w) - if err != nil { - return 0, err - } - - postingsOffset := uint64(w.Count()) - - n := binary.PutUvarint(bufMaxVarintLen64, tfOffset) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return 0, err - } - - n = binary.PutUvarint(bufMaxVarintLen64, locOffset) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return 0, err - } - - _, err = writeRoaringWithLen(postings, w, bufMaxVarintLen64) - if err != nil { - return 0, err - } - - return postingsOffset, nil -} - -type varintEncoder func(uint64) (int, error) - -func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap, - fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64, - w *CountHashWriter, closeCh chan struct{}) (uint64, [][]uint64, error) { - var rv [][]uint64 // The remapped or newDocNums for each segment. - - var newDocNum uint64 - - var curr int - var data, compressed []byte - var metaBuf bytes.Buffer - varBuf := make([]byte, binary.MaxVarintLen64) - metaEncode := func(val uint64) (int, error) { - wb := binary.PutUvarint(varBuf, val) - return metaBuf.Write(varBuf[:wb]) - } - - vals := make([][][]byte, len(fieldsInv)) - typs := make([][]byte, len(fieldsInv)) - poss := make([][][]uint64, len(fieldsInv)) - - var posBuf []uint64 - - docNumOffsets := make([]uint64, newSegDocCount) - - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - defer visitDocumentCtxPool.Put(vdc) - - // for each segment - for segI, segment := range segments { - // check for the closure in meantime - if isClosed(closeCh) { - return 0, nil, seg.ErrClosed - } - - segNewDocNums := make([]uint64, segment.numDocs) - - dropsI := drops[segI] - - // optimize when the field mapping is the same across all - // segments and there are no deletions, via byte-copying - // of stored docs bytes directly to the writer - if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) { - err := segment.copyStoredDocs(newDocNum, docNumOffsets, w) - if err != nil { - return 0, nil, err - } - - for i := uint64(0); i < segment.numDocs; i++ { - segNewDocNums[i] = newDocNum - newDocNum++ - } - rv = append(rv, segNewDocNums) - - continue - } - - // for each doc num - for docNum := uint64(0); docNum < segment.numDocs; docNum++ { - // TODO: roaring's API limits docNums to 32-bits? - if dropsI != nil && dropsI.Contains(uint32(docNum)) { - segNewDocNums[docNum] = docDropped - continue - } - - segNewDocNums[docNum] = newDocNum - - curr = 0 - metaBuf.Reset() - data = data[:0] - - posTemp := posBuf - - // collect all the data - for i := 0; i < len(fieldsInv); i++ { - vals[i] = vals[i][:0] - typs[i] = typs[i][:0] - poss[i] = poss[i][:0] - } - err := segment.visitStoredFields(vdc, docNum, func(field string, typ byte, value []byte, pos []uint64) bool { - fieldID := int(fieldsMap[field]) - 1 - vals[fieldID] = append(vals[fieldID], value) - typs[fieldID] = append(typs[fieldID], typ) - - // copy array positions to preserve them beyond the scope of this callback - var curPos []uint64 - if len(pos) > 0 { - if cap(posTemp) < len(pos) { - posBuf = make([]uint64, len(pos)*len(fieldsInv)) - posTemp = posBuf - } - curPos = posTemp[0:len(pos)] - copy(curPos, pos) - posTemp = posTemp[len(pos):] - } - poss[fieldID] = append(poss[fieldID], curPos) - - return true - }) - if err != nil { - return 0, nil, err - } - - // _id field special case optimizes ExternalID() lookups - idFieldVal := vals[uint16(0)][0] - _, err = metaEncode(uint64(len(idFieldVal))) - if err != nil { - return 0, nil, err - } - - // now walk the non-"_id" fields in order - for fieldID := 1; fieldID < len(fieldsInv); fieldID++ { - storedFieldValues := vals[fieldID] - - stf := typs[fieldID] - spf := poss[fieldID] - - var err2 error - curr, data, err2 = persistStoredFieldValues(fieldID, - storedFieldValues, stf, spf, curr, metaEncode, data) - if err2 != nil { - return 0, nil, err2 - } - } - - metaBytes := metaBuf.Bytes() - - compressed = snappy.Encode(compressed[:cap(compressed)], data) - - // record where we're about to start writing - docNumOffsets[newDocNum] = uint64(w.Count()) - - // write out the meta len and compressed data len - _, err = writeUvarints(w, - uint64(len(metaBytes)), - uint64(len(idFieldVal)+len(compressed))) - if err != nil { - return 0, nil, err - } - // now write the meta - _, err = w.Write(metaBytes) - if err != nil { - return 0, nil, err - } - // now write the _id field val (counted as part of the 'compressed' data) - _, err = w.Write(idFieldVal) - if err != nil { - return 0, nil, err - } - // now write the compressed data - _, err = w.Write(compressed) - if err != nil { - return 0, nil, err - } - - newDocNum++ - } - - rv = append(rv, segNewDocNums) - } - - // return value is the start of the stored index - storedIndexOffset := uint64(w.Count()) - - // now write out the stored doc index - for _, docNumOffset := range docNumOffsets { - err := binary.Write(w, binary.BigEndian, docNumOffset) - if err != nil { - return 0, nil, err - } - } - - return storedIndexOffset, rv, nil -} - -// copyStoredDocs writes out a segment's stored doc info, optimized by -// using a single Write() call for the entire set of bytes. The -// newDocNumOffsets is filled with the new offsets for each doc. -func (s *SegmentBase) copyStoredDocs(newDocNum uint64, newDocNumOffsets []uint64, - w *CountHashWriter) error { - if s.numDocs <= 0 { - return nil - } - - indexOffset0, storedOffset0, _, _, _ := - s.getDocStoredOffsets(0) // the segment's first doc - - indexOffsetN, storedOffsetN, readN, metaLenN, dataLenN := - s.getDocStoredOffsets(s.numDocs - 1) // the segment's last doc - - storedOffset0New := uint64(w.Count()) - - storedBytes := s.mem[storedOffset0 : storedOffsetN+readN+metaLenN+dataLenN] - _, err := w.Write(storedBytes) - if err != nil { - return err - } - - // remap the storedOffset's for the docs into new offsets relative - // to storedOffset0New, filling the given docNumOffsetsOut array - for indexOffset := indexOffset0; indexOffset <= indexOffsetN; indexOffset += 8 { - storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) - storedOffsetNew := storedOffset - storedOffset0 + storedOffset0New - newDocNumOffsets[newDocNum] = storedOffsetNew - newDocNum += 1 - } - - return nil -} - -// mergeFields builds a unified list of fields used across all the -// input segments, and computes whether the fields are the same across -// segments (which depends on fields to be sorted in the same way -// across segments) -func mergeFields(segments []*SegmentBase) (bool, []string) { - fieldsSame := true - - var segment0Fields []string - if len(segments) > 0 { - segment0Fields = segments[0].Fields() - } - - fieldsExist := map[string]struct{}{} - for _, segment := range segments { - fields := segment.Fields() - for fieldi, field := range fields { - fieldsExist[field] = struct{}{} - if len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field { - fieldsSame = false - } - } - } - - rv := make([]string, 0, len(fieldsExist)) - // ensure _id stays first - rv = append(rv, "_id") - for k := range fieldsExist { - if k != "_id" { - rv = append(rv, k) - } - } - - sort.Strings(rv[1:]) // leave _id as first - - return fieldsSame, rv -} - -func isClosed(closeCh chan struct{}) bool { - select { - case <-closeCh: - return true - default: - return false - } -} diff --git a/vendor/github.com/blevesearch/zapx/v14/new.go b/vendor/github.com/blevesearch/zapx/v14/new.go deleted file mode 100644 index b4e0d0341..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/new.go +++ /dev/null @@ -1,830 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "math" - "sort" - "sync" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - "github.com/golang/snappy" -) - -var NewSegmentBufferNumResultsBump int = 100 -var NewSegmentBufferNumResultsFactor float64 = 1.0 -var NewSegmentBufferAvgBytesPerDocFactor float64 = 1.0 - -// ValidateDocFields can be set by applications to perform additional checks -// on fields in a document being added to a new segment, by default it does -// nothing. -// This API is experimental and may be removed at any time. -var ValidateDocFields = func(field index.Field) error { - return nil -} - -// New creates an in-memory zap-encoded SegmentBase from a set of Documents -func (z *ZapPlugin) New(results []index.Document) ( - segment.Segment, uint64, error) { - return z.newWithChunkMode(results, DefaultChunkMode) -} - -func (*ZapPlugin) newWithChunkMode(results []index.Document, - chunkMode uint32) (segment.Segment, uint64, error) { - s := interimPool.Get().(*interim) - - var br bytes.Buffer - if s.lastNumDocs > 0 { - // use previous results to initialize the buf with an estimate - // size, but note that the interim instance comes from a - // global interimPool, so multiple scorch instances indexing - // different docs can lead to low quality estimates - estimateAvgBytesPerDoc := int(float64(s.lastOutSize/s.lastNumDocs) * - NewSegmentBufferNumResultsFactor) - estimateNumResults := int(float64(len(results)+NewSegmentBufferNumResultsBump) * - NewSegmentBufferAvgBytesPerDocFactor) - br.Grow(estimateAvgBytesPerDoc * estimateNumResults) - } - - s.results = results - s.chunkMode = chunkMode - s.w = NewCountHashWriter(&br) - - storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, - err := s.convert() - if err != nil { - return nil, uint64(0), err - } - - sb, err := InitSegmentBase(br.Bytes(), s.w.Sum32(), chunkMode, - s.FieldsMap, s.FieldsInv, uint64(len(results)), - storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets) - - if err == nil && s.reset() == nil { - s.lastNumDocs = len(results) - s.lastOutSize = len(br.Bytes()) - interimPool.Put(s) - } - - return sb, uint64(len(br.Bytes())), err -} - -var interimPool = sync.Pool{New: func() interface{} { return &interim{} }} - -// interim holds temporary working data used while converting from -// analysis results to a zap-encoded segment -type interim struct { - results []index.Document - - chunkMode uint32 - - w *CountHashWriter - - // FieldsMap adds 1 to field id to avoid zero value issues - // name -> field id + 1 - FieldsMap map[string]uint16 - - // FieldsInv is the inverse of FieldsMap - // field id -> name - FieldsInv []string - - // Term dictionaries for each field - // field id -> term -> postings list id + 1 - Dicts []map[string]uint64 - - // Terms for each field, where terms are sorted ascending - // field id -> []term - DictKeys [][]string - - // Fields whose IncludeDocValues is true - // field id -> bool - IncludeDocValues []bool - - // postings id -> bitmap of docNums - Postings []*roaring.Bitmap - - // postings id -> freq/norm's, one for each docNum in postings - FreqNorms [][]interimFreqNorm - freqNormsBacking []interimFreqNorm - - // postings id -> locs, one for each freq - Locs [][]interimLoc - locsBacking []interimLoc - - numTermsPerPostingsList []int // key is postings list id - numLocsPerPostingsList []int // key is postings list id - - builder *vellum.Builder - builderBuf bytes.Buffer - - metaBuf bytes.Buffer - - tmp0 []byte - tmp1 []byte - - lastNumDocs int - lastOutSize int -} - -func (s *interim) reset() (err error) { - s.results = nil - s.chunkMode = 0 - s.w = nil - s.FieldsMap = nil - s.FieldsInv = nil - for i := range s.Dicts { - s.Dicts[i] = nil - } - s.Dicts = s.Dicts[:0] - for i := range s.DictKeys { - s.DictKeys[i] = s.DictKeys[i][:0] - } - s.DictKeys = s.DictKeys[:0] - for i := range s.IncludeDocValues { - s.IncludeDocValues[i] = false - } - s.IncludeDocValues = s.IncludeDocValues[:0] - for _, idn := range s.Postings { - idn.Clear() - } - s.Postings = s.Postings[:0] - s.FreqNorms = s.FreqNorms[:0] - for i := range s.freqNormsBacking { - s.freqNormsBacking[i] = interimFreqNorm{} - } - s.freqNormsBacking = s.freqNormsBacking[:0] - s.Locs = s.Locs[:0] - for i := range s.locsBacking { - s.locsBacking[i] = interimLoc{} - } - s.locsBacking = s.locsBacking[:0] - s.numTermsPerPostingsList = s.numTermsPerPostingsList[:0] - s.numLocsPerPostingsList = s.numLocsPerPostingsList[:0] - s.builderBuf.Reset() - if s.builder != nil { - err = s.builder.Reset(&s.builderBuf) - } - s.metaBuf.Reset() - s.tmp0 = s.tmp0[:0] - s.tmp1 = s.tmp1[:0] - s.lastNumDocs = 0 - s.lastOutSize = 0 - - return err -} - -func (s *interim) grabBuf(size int) []byte { - buf := s.tmp0 - if cap(buf) < size { - buf = make([]byte, size) - s.tmp0 = buf - } - return buf[0:size] -} - -type interimStoredField struct { - vals [][]byte - typs []byte - arrayposs [][]uint64 // array positions -} - -type interimFreqNorm struct { - freq uint64 - norm float32 - numLocs int -} - -type interimLoc struct { - fieldID uint16 - pos uint64 - start uint64 - end uint64 - arrayposs []uint64 -} - -func (s *interim) convert() (uint64, uint64, uint64, []uint64, error) { - s.FieldsMap = map[string]uint16{} - - s.getOrDefineField("_id") // _id field is fieldID 0 - - for _, result := range s.results { - result.VisitComposite(func(field index.CompositeField) { - s.getOrDefineField(field.Name()) - }) - result.VisitFields(func(field index.Field) { - s.getOrDefineField(field.Name()) - }) - } - - sort.Strings(s.FieldsInv[1:]) // keep _id as first field - - for fieldID, fieldName := range s.FieldsInv { - s.FieldsMap[fieldName] = uint16(fieldID + 1) - } - - if cap(s.IncludeDocValues) >= len(s.FieldsInv) { - s.IncludeDocValues = s.IncludeDocValues[:len(s.FieldsInv)] - } else { - s.IncludeDocValues = make([]bool, len(s.FieldsInv)) - } - - s.prepareDicts() - - for _, dict := range s.DictKeys { - sort.Strings(dict) - } - - s.processDocuments() - - storedIndexOffset, err := s.writeStoredFields() - if err != nil { - return 0, 0, 0, nil, err - } - - var fdvIndexOffset uint64 - var dictOffsets []uint64 - - if len(s.results) > 0 { - fdvIndexOffset, dictOffsets, err = s.writeDicts() - if err != nil { - return 0, 0, 0, nil, err - } - } else { - dictOffsets = make([]uint64, len(s.FieldsInv)) - } - - fieldsIndexOffset, err := persistFields(s.FieldsInv, s.w, dictOffsets) - if err != nil { - return 0, 0, 0, nil, err - } - - return storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, nil -} - -func (s *interim) getOrDefineField(fieldName string) int { - fieldIDPlus1, exists := s.FieldsMap[fieldName] - if !exists { - fieldIDPlus1 = uint16(len(s.FieldsInv) + 1) - s.FieldsMap[fieldName] = fieldIDPlus1 - s.FieldsInv = append(s.FieldsInv, fieldName) - - s.Dicts = append(s.Dicts, make(map[string]uint64)) - - n := len(s.DictKeys) - if n < cap(s.DictKeys) { - s.DictKeys = s.DictKeys[:n+1] - s.DictKeys[n] = s.DictKeys[n][:0] - } else { - s.DictKeys = append(s.DictKeys, []string(nil)) - } - } - - return int(fieldIDPlus1 - 1) -} - -// fill Dicts and DictKeys from analysis results -func (s *interim) prepareDicts() { - var pidNext int - - var totTFs int - var totLocs int - - visitField := func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - - dict := s.Dicts[fieldID] - dictKeys := s.DictKeys[fieldID] - - tfs := field.AnalyzedTokenFrequencies() - for term, tf := range tfs { - pidPlus1, exists := dict[term] - if !exists { - pidNext++ - pidPlus1 = uint64(pidNext) - - dict[term] = pidPlus1 - dictKeys = append(dictKeys, term) - - s.numTermsPerPostingsList = append(s.numTermsPerPostingsList, 0) - s.numLocsPerPostingsList = append(s.numLocsPerPostingsList, 0) - } - - pid := pidPlus1 - 1 - - s.numTermsPerPostingsList[pid] += 1 - s.numLocsPerPostingsList[pid] += len(tf.Locations) - - totLocs += len(tf.Locations) - } - - totTFs += len(tfs) - - s.DictKeys[fieldID] = dictKeys - } - - for _, result := range s.results { - // walk each composite field - result.VisitComposite(func(field index.CompositeField) { - visitField(field) - }) - - // walk each field - result.VisitFields(visitField) - } - - numPostingsLists := pidNext - - if cap(s.Postings) >= numPostingsLists { - s.Postings = s.Postings[:numPostingsLists] - } else { - postings := make([]*roaring.Bitmap, numPostingsLists) - copy(postings, s.Postings[:cap(s.Postings)]) - for i := 0; i < numPostingsLists; i++ { - if postings[i] == nil { - postings[i] = roaring.New() - } - } - s.Postings = postings - } - - if cap(s.FreqNorms) >= numPostingsLists { - s.FreqNorms = s.FreqNorms[:numPostingsLists] - } else { - s.FreqNorms = make([][]interimFreqNorm, numPostingsLists) - } - - if cap(s.freqNormsBacking) >= totTFs { - s.freqNormsBacking = s.freqNormsBacking[:totTFs] - } else { - s.freqNormsBacking = make([]interimFreqNorm, totTFs) - } - - freqNormsBacking := s.freqNormsBacking - for pid, numTerms := range s.numTermsPerPostingsList { - s.FreqNorms[pid] = freqNormsBacking[0:0] - freqNormsBacking = freqNormsBacking[numTerms:] - } - - if cap(s.Locs) >= numPostingsLists { - s.Locs = s.Locs[:numPostingsLists] - } else { - s.Locs = make([][]interimLoc, numPostingsLists) - } - - if cap(s.locsBacking) >= totLocs { - s.locsBacking = s.locsBacking[:totLocs] - } else { - s.locsBacking = make([]interimLoc, totLocs) - } - - locsBacking := s.locsBacking - for pid, numLocs := range s.numLocsPerPostingsList { - s.Locs[pid] = locsBacking[0:0] - locsBacking = locsBacking[numLocs:] - } -} - -func (s *interim) processDocuments() { - numFields := len(s.FieldsInv) - reuseFieldLens := make([]int, numFields) - reuseFieldTFs := make([]index.TokenFrequencies, numFields) - - for docNum, result := range s.results { - for i := 0; i < numFields; i++ { // clear these for reuse - reuseFieldLens[i] = 0 - reuseFieldTFs[i] = nil - } - - s.processDocument(uint64(docNum), result, - reuseFieldLens, reuseFieldTFs) - } -} - -func (s *interim) processDocument(docNum uint64, - result index.Document, - fieldLens []int, fieldTFs []index.TokenFrequencies) { - visitField := func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - fieldLens[fieldID] += field.AnalyzedLength() - - existingFreqs := fieldTFs[fieldID] - if existingFreqs != nil { - existingFreqs.MergeAll(field.Name(), field.AnalyzedTokenFrequencies()) - } else { - fieldTFs[fieldID] = field.AnalyzedTokenFrequencies() - } - } - - // walk each composite field - result.VisitComposite(func(field index.CompositeField) { - visitField(field) - }) - - // walk each field - result.VisitFields(visitField) - - // now that it's been rolled up into fieldTFs, walk that - for fieldID, tfs := range fieldTFs { - dict := s.Dicts[fieldID] - norm := float32(1.0 / math.Sqrt(float64(fieldLens[fieldID]))) - - for term, tf := range tfs { - pid := dict[term] - 1 - bs := s.Postings[pid] - bs.Add(uint32(docNum)) - - s.FreqNorms[pid] = append(s.FreqNorms[pid], - interimFreqNorm{ - freq: uint64(tf.Frequency()), - norm: norm, - numLocs: len(tf.Locations), - }) - - if len(tf.Locations) > 0 { - locs := s.Locs[pid] - - for _, loc := range tf.Locations { - var locf = uint16(fieldID) - if loc.Field != "" { - locf = uint16(s.getOrDefineField(loc.Field)) - } - var arrayposs []uint64 - if len(loc.ArrayPositions) > 0 { - arrayposs = loc.ArrayPositions - } - locs = append(locs, interimLoc{ - fieldID: locf, - pos: uint64(loc.Position), - start: uint64(loc.Start), - end: uint64(loc.End), - arrayposs: arrayposs, - }) - } - - s.Locs[pid] = locs - } - } - } -} - -func (s *interim) writeStoredFields() ( - storedIndexOffset uint64, err error) { - varBuf := make([]byte, binary.MaxVarintLen64) - metaEncode := func(val uint64) (int, error) { - wb := binary.PutUvarint(varBuf, val) - return s.metaBuf.Write(varBuf[:wb]) - } - - data, compressed := s.tmp0[:0], s.tmp1[:0] - defer func() { s.tmp0, s.tmp1 = data, compressed }() - - // keyed by docNum - docStoredOffsets := make([]uint64, len(s.results)) - - // keyed by fieldID, for the current doc in the loop - docStoredFields := map[uint16]interimStoredField{} - - for docNum, result := range s.results { - for fieldID := range docStoredFields { // reset for next doc - delete(docStoredFields, fieldID) - } - - var validationErr error - result.VisitFields(func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - - if field.Options().IsStored() { - isf := docStoredFields[fieldID] - isf.vals = append(isf.vals, field.Value()) - isf.typs = append(isf.typs, field.EncodedFieldType()) - isf.arrayposs = append(isf.arrayposs, field.ArrayPositions()) - docStoredFields[fieldID] = isf - } - - if field.Options().IncludeDocValues() { - s.IncludeDocValues[fieldID] = true - } - - err := ValidateDocFields(field) - if err != nil && validationErr == nil { - validationErr = err - } - }) - if validationErr != nil { - return 0, validationErr - } - - var curr int - - s.metaBuf.Reset() - data = data[:0] - - // _id field special case optimizes ExternalID() lookups - idFieldVal := docStoredFields[uint16(0)].vals[0] - _, err = metaEncode(uint64(len(idFieldVal))) - if err != nil { - return 0, err - } - - // handle non-"_id" fields - for fieldID := 1; fieldID < len(s.FieldsInv); fieldID++ { - isf, exists := docStoredFields[uint16(fieldID)] - if exists { - curr, data, err = persistStoredFieldValues( - fieldID, isf.vals, isf.typs, isf.arrayposs, - curr, metaEncode, data) - if err != nil { - return 0, err - } - } - } - - metaBytes := s.metaBuf.Bytes() - - compressed = snappy.Encode(compressed[:cap(compressed)], data) - - docStoredOffsets[docNum] = uint64(s.w.Count()) - - _, err := writeUvarints(s.w, - uint64(len(metaBytes)), - uint64(len(idFieldVal)+len(compressed))) - if err != nil { - return 0, err - } - - _, err = s.w.Write(metaBytes) - if err != nil { - return 0, err - } - - _, err = s.w.Write(idFieldVal) - if err != nil { - return 0, err - } - - _, err = s.w.Write(compressed) - if err != nil { - return 0, err - } - } - - storedIndexOffset = uint64(s.w.Count()) - - for _, docStoredOffset := range docStoredOffsets { - err = binary.Write(s.w, binary.BigEndian, docStoredOffset) - if err != nil { - return 0, err - } - } - - return storedIndexOffset, nil -} - -func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err error) { - dictOffsets = make([]uint64, len(s.FieldsInv)) - - fdvOffsetsStart := make([]uint64, len(s.FieldsInv)) - fdvOffsetsEnd := make([]uint64, len(s.FieldsInv)) - - buf := s.grabBuf(binary.MaxVarintLen64) - - // these int coders are initialized with chunk size 1024 - // however this will be reset to the correct chunk size - // while processing each individual field-term section - tfEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1)) - locEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1)) - - var docTermMap [][]byte - - if s.builder == nil { - s.builder, err = vellum.New(&s.builderBuf, nil) - if err != nil { - return 0, nil, err - } - } - - for fieldID, terms := range s.DictKeys { - if cap(docTermMap) < len(s.results) { - docTermMap = make([][]byte, len(s.results)) - } else { - docTermMap = docTermMap[0:len(s.results)] - for docNum := range docTermMap { // reset the docTermMap - docTermMap[docNum] = docTermMap[docNum][:0] - } - } - - dict := s.Dicts[fieldID] - - for _, term := range terms { // terms are already sorted - pid := dict[term] - 1 - - postingsBS := s.Postings[pid] - - freqNorms := s.FreqNorms[pid] - freqNormOffset := 0 - - locs := s.Locs[pid] - locOffset := 0 - - chunkSize, err := getChunkSize(s.chunkMode, postingsBS.GetCardinality(), uint64(len(s.results))) - if err != nil { - return 0, nil, err - } - tfEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) - locEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) - - postingsItr := postingsBS.Iterator() - for postingsItr.HasNext() { - docNum := uint64(postingsItr.Next()) - - freqNorm := freqNorms[freqNormOffset] - - err = tfEncoder.Add(docNum, - encodeFreqHasLocs(freqNorm.freq, freqNorm.numLocs > 0), - uint64(math.Float32bits(freqNorm.norm))) - if err != nil { - return 0, nil, err - } - - if freqNorm.numLocs > 0 { - numBytesLocs := 0 - for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { - numBytesLocs += totalUvarintBytes( - uint64(loc.fieldID), loc.pos, loc.start, loc.end, - uint64(len(loc.arrayposs)), loc.arrayposs) - } - - err = locEncoder.Add(docNum, uint64(numBytesLocs)) - if err != nil { - return 0, nil, err - } - - for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { - err = locEncoder.Add(docNum, - uint64(loc.fieldID), loc.pos, loc.start, loc.end, - uint64(len(loc.arrayposs))) - if err != nil { - return 0, nil, err - } - - err = locEncoder.Add(docNum, loc.arrayposs...) - if err != nil { - return 0, nil, err - } - } - - locOffset += freqNorm.numLocs - } - - freqNormOffset++ - - docTermMap[docNum] = append( - append(docTermMap[docNum], term...), - termSeparator) - } - - tfEncoder.Close() - locEncoder.Close() - - postingsOffset, err := - writePostings(postingsBS, tfEncoder, locEncoder, nil, s.w, buf) - if err != nil { - return 0, nil, err - } - - if postingsOffset > uint64(0) { - err = s.builder.Insert([]byte(term), postingsOffset) - if err != nil { - return 0, nil, err - } - } - - tfEncoder.Reset() - locEncoder.Reset() - } - - err = s.builder.Close() - if err != nil { - return 0, nil, err - } - - // record where this dictionary starts - dictOffsets[fieldID] = uint64(s.w.Count()) - - vellumData := s.builderBuf.Bytes() - - // write out the length of the vellum data - n := binary.PutUvarint(buf, uint64(len(vellumData))) - _, err = s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - - // write this vellum to disk - _, err = s.w.Write(vellumData) - if err != nil { - return 0, nil, err - } - - // reset vellum for reuse - s.builderBuf.Reset() - - err = s.builder.Reset(&s.builderBuf) - if err != nil { - return 0, nil, err - } - - // write the field doc values - // NOTE: doc values continue to use legacy chunk mode - chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0) - if err != nil { - return 0, nil, err - } - fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false) - if s.IncludeDocValues[fieldID] { - for docNum, docTerms := range docTermMap { - if len(docTerms) > 0 { - err = fdvEncoder.Add(uint64(docNum), docTerms) - if err != nil { - return 0, nil, err - } - } - } - err = fdvEncoder.Close() - if err != nil { - return 0, nil, err - } - - fdvOffsetsStart[fieldID] = uint64(s.w.Count()) - - _, err = fdvEncoder.Write() - if err != nil { - return 0, nil, err - } - - fdvOffsetsEnd[fieldID] = uint64(s.w.Count()) - - fdvEncoder.Reset() - } else { - fdvOffsetsStart[fieldID] = fieldNotUninverted - fdvOffsetsEnd[fieldID] = fieldNotUninverted - } - } - - fdvIndexOffset = uint64(s.w.Count()) - - for i := 0; i < len(fdvOffsetsStart); i++ { - n := binary.PutUvarint(buf, fdvOffsetsStart[i]) - _, err := s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - n = binary.PutUvarint(buf, fdvOffsetsEnd[i]) - _, err = s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - } - - return fdvIndexOffset, dictOffsets, nil -} - -// returns the total # of bytes needed to encode the given uint64's -// into binary.PutUVarint() encoding -func totalUvarintBytes(a, b, c, d, e uint64, more []uint64) (n int) { - n = numUvarintBytes(a) - n += numUvarintBytes(b) - n += numUvarintBytes(c) - n += numUvarintBytes(d) - n += numUvarintBytes(e) - for _, v := range more { - n += numUvarintBytes(v) - } - return n -} - -// returns # of bytes needed to encode x in binary.PutUvarint() encoding -func numUvarintBytes(x uint64) (n int) { - for x >= 0x80 { - x >>= 7 - n++ - } - return n + 1 -} diff --git a/vendor/github.com/blevesearch/zapx/v14/read.go b/vendor/github.com/blevesearch/zapx/v14/read.go deleted file mode 100644 index e47d4c6ab..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/read.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import "encoding/binary" - -func (s *SegmentBase) getDocStoredMetaAndCompressed(docNum uint64) ([]byte, []byte) { - _, storedOffset, n, metaLen, dataLen := s.getDocStoredOffsets(docNum) - - meta := s.mem[storedOffset+n : storedOffset+n+metaLen] - data := s.mem[storedOffset+n+metaLen : storedOffset+n+metaLen+dataLen] - - return meta, data -} - -func (s *SegmentBase) getDocStoredOffsets(docNum uint64) ( - uint64, uint64, uint64, uint64, uint64) { - indexOffset := s.storedIndexOffset + (8 * docNum) - - storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) - - var n uint64 - - metaLen, read := binary.Uvarint(s.mem[storedOffset : storedOffset+binary.MaxVarintLen64]) - n += uint64(read) - - dataLen, read := binary.Uvarint(s.mem[storedOffset+n : storedOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - return indexOffset, storedOffset, n, metaLen, dataLen -} diff --git a/vendor/github.com/blevesearch/zapx/v14/segment.go b/vendor/github.com/blevesearch/zapx/v14/segment.go deleted file mode 100644 index 6317ad403..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/segment.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "os" - "sync" - "unsafe" - - "github.com/RoaringBitmap/roaring" - mmap "github.com/blevesearch/mmap-go" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - "github.com/golang/snappy" -) - -var reflectStaticSizeSegmentBase int - -func init() { - var sb SegmentBase - reflectStaticSizeSegmentBase = int(unsafe.Sizeof(sb)) -} - -// Open returns a zap impl of a segment -func (*ZapPlugin) Open(path string) (segment.Segment, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - mm, err := mmap.Map(f, mmap.RDONLY, 0) - if err != nil { - // mmap failed, try to close the file - _ = f.Close() - return nil, err - } - - rv := &Segment{ - SegmentBase: SegmentBase{ - mem: mm[0 : len(mm)-FooterSize], - fieldsMap: make(map[string]uint16), - fieldDvReaders: make(map[uint16]*docValueReader), - fieldFSTs: make(map[uint16]*vellum.FST), - }, - f: f, - mm: mm, - path: path, - refs: 1, - } - rv.SegmentBase.updateSize() - - err = rv.loadConfig() - if err != nil { - _ = rv.Close() - return nil, err - } - - err = rv.loadFields() - if err != nil { - _ = rv.Close() - return nil, err - } - - err = rv.loadDvReaders() - if err != nil { - _ = rv.Close() - return nil, err - } - - return rv, nil -} - -// SegmentBase is a memory only, read-only implementation of the -// segment.Segment interface, using zap's data representation. -type SegmentBase struct { - mem []byte - memCRC uint32 - chunkMode uint32 - fieldsMap map[string]uint16 // fieldName -> fieldID+1 - fieldsInv []string // fieldID -> fieldName - numDocs uint64 - storedIndexOffset uint64 - fieldsIndexOffset uint64 - docValueOffset uint64 - dictLocs []uint64 - fieldDvReaders map[uint16]*docValueReader // naive chunk cache per field - fieldDvNames []string // field names cached in fieldDvReaders - size uint64 - - m sync.Mutex - fieldFSTs map[uint16]*vellum.FST -} - -func (sb *SegmentBase) Size() int { - return int(sb.size) -} - -func (sb *SegmentBase) updateSize() { - sizeInBytes := reflectStaticSizeSegmentBase + - cap(sb.mem) - - // fieldsMap - for k := range sb.fieldsMap { - sizeInBytes += (len(k) + SizeOfString) + SizeOfUint16 - } - - // fieldsInv, dictLocs - for _, entry := range sb.fieldsInv { - sizeInBytes += len(entry) + SizeOfString - } - sizeInBytes += len(sb.dictLocs) * SizeOfUint64 - - // fieldDvReaders - for _, v := range sb.fieldDvReaders { - sizeInBytes += SizeOfUint16 + SizeOfPtr - if v != nil { - sizeInBytes += v.size() - } - } - - sb.size = uint64(sizeInBytes) -} - -func (sb *SegmentBase) AddRef() {} -func (sb *SegmentBase) DecRef() (err error) { return nil } -func (sb *SegmentBase) Close() (err error) { return nil } - -// Segment implements a persisted segment.Segment interface, by -// embedding an mmap()'ed SegmentBase. -type Segment struct { - SegmentBase - - f *os.File - mm mmap.MMap - path string - version uint32 - crc uint32 - - m sync.Mutex // Protects the fields that follow. - refs int64 -} - -func (s *Segment) Size() int { - // 8 /* size of file pointer */ - // 4 /* size of version -> uint32 */ - // 4 /* size of crc -> uint32 */ - sizeOfUints := 16 - - sizeInBytes := (len(s.path) + SizeOfString) + sizeOfUints - - // mutex, refs -> int64 - sizeInBytes += 16 - - // do not include the mmap'ed part - return sizeInBytes + s.SegmentBase.Size() - cap(s.mem) -} - -func (s *Segment) AddRef() { - s.m.Lock() - s.refs++ - s.m.Unlock() -} - -func (s *Segment) DecRef() (err error) { - s.m.Lock() - s.refs-- - if s.refs == 0 { - err = s.closeActual() - } - s.m.Unlock() - return err -} - -func (s *Segment) loadConfig() error { - crcOffset := len(s.mm) - 4 - s.crc = binary.BigEndian.Uint32(s.mm[crcOffset : crcOffset+4]) - - verOffset := crcOffset - 4 - s.version = binary.BigEndian.Uint32(s.mm[verOffset : verOffset+4]) - if s.version != Version { - return fmt.Errorf("unsupported version %d", s.version) - } - - chunkOffset := verOffset - 4 - s.chunkMode = binary.BigEndian.Uint32(s.mm[chunkOffset : chunkOffset+4]) - - docValueOffset := chunkOffset - 8 - s.docValueOffset = binary.BigEndian.Uint64(s.mm[docValueOffset : docValueOffset+8]) - - fieldsIndexOffset := docValueOffset - 8 - s.fieldsIndexOffset = binary.BigEndian.Uint64(s.mm[fieldsIndexOffset : fieldsIndexOffset+8]) - - storedIndexOffset := fieldsIndexOffset - 8 - s.storedIndexOffset = binary.BigEndian.Uint64(s.mm[storedIndexOffset : storedIndexOffset+8]) - - numDocsOffset := storedIndexOffset - 8 - s.numDocs = binary.BigEndian.Uint64(s.mm[numDocsOffset : numDocsOffset+8]) - return nil -} - -func (s *SegmentBase) loadFields() error { - // NOTE for now we assume the fields index immediately precedes - // the footer, and if this changes, need to adjust accordingly (or - // store explicit length), where s.mem was sliced from s.mm in Open(). - fieldsIndexEnd := uint64(len(s.mem)) - - // iterate through fields index - var fieldID uint64 - for s.fieldsIndexOffset+(8*fieldID) < fieldsIndexEnd { - addr := binary.BigEndian.Uint64(s.mem[s.fieldsIndexOffset+(8*fieldID) : s.fieldsIndexOffset+(8*fieldID)+8]) - - dictLoc, read := binary.Uvarint(s.mem[addr:fieldsIndexEnd]) - n := uint64(read) - s.dictLocs = append(s.dictLocs, dictLoc) - - var nameLen uint64 - nameLen, read = binary.Uvarint(s.mem[addr+n : fieldsIndexEnd]) - n += uint64(read) - - name := string(s.mem[addr+n : addr+n+nameLen]) - s.fieldsInv = append(s.fieldsInv, name) - s.fieldsMap[name] = uint16(fieldID + 1) - - fieldID++ - } - return nil -} - -// Dictionary returns the term dictionary for the specified field -func (s *SegmentBase) Dictionary(field string) (segment.TermDictionary, error) { - dict, err := s.dictionary(field) - if err == nil && dict == nil { - return emptyDictionary, nil - } - return dict, err -} - -func (sb *SegmentBase) dictionary(field string) (rv *Dictionary, err error) { - fieldIDPlus1 := sb.fieldsMap[field] - if fieldIDPlus1 > 0 { - rv = &Dictionary{ - sb: sb, - field: field, - fieldID: fieldIDPlus1 - 1, - } - - dictStart := sb.dictLocs[rv.fieldID] - if dictStart > 0 { - var ok bool - sb.m.Lock() - if rv.fst, ok = sb.fieldFSTs[rv.fieldID]; !ok { - // read the length of the vellum data - vellumLen, read := binary.Uvarint(sb.mem[dictStart : dictStart+binary.MaxVarintLen64]) - fstBytes := sb.mem[dictStart+uint64(read) : dictStart+uint64(read)+vellumLen] - rv.fst, err = vellum.Load(fstBytes) - if err != nil { - sb.m.Unlock() - return nil, fmt.Errorf("dictionary field %s vellum err: %v", field, err) - } - - sb.fieldFSTs[rv.fieldID] = rv.fst - } - - sb.m.Unlock() - rv.fstReader, err = rv.fst.Reader() - if err != nil { - return nil, fmt.Errorf("dictionary field %s vellum reader err: %v", field, err) - } - } - } - - return rv, nil -} - -// visitDocumentCtx holds data structures that are reusable across -// multiple VisitDocument() calls to avoid memory allocations -type visitDocumentCtx struct { - buf []byte - reader bytes.Reader - arrayPos []uint64 -} - -var visitDocumentCtxPool = sync.Pool{ - New: func() interface{} { - reuse := &visitDocumentCtx{} - return reuse - }, -} - -// VisitStoredFields invokes the StoredFieldValueVisitor for each stored field -// for the specified doc number -func (s *SegmentBase) VisitStoredFields(num uint64, visitor segment.StoredFieldValueVisitor) error { - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - defer visitDocumentCtxPool.Put(vdc) - return s.visitStoredFields(vdc, num, visitor) -} - -func (s *SegmentBase) visitStoredFields(vdc *visitDocumentCtx, num uint64, - visitor segment.StoredFieldValueVisitor) error { - // first make sure this is a valid number in this segment - if num < s.numDocs { - meta, compressed := s.getDocStoredMetaAndCompressed(num) - - vdc.reader.Reset(meta) - - // handle _id field special case - idFieldValLen, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - idFieldVal := compressed[:idFieldValLen] - - keepGoing := visitor("_id", byte('t'), idFieldVal, nil) - if !keepGoing { - visitDocumentCtxPool.Put(vdc) - return nil - } - - // handle non-"_id" fields - compressed = compressed[idFieldValLen:] - - uncompressed, err := snappy.Decode(vdc.buf[:cap(vdc.buf)], compressed) - if err != nil { - return err - } - - for keepGoing { - field, err := binary.ReadUvarint(&vdc.reader) - if err == io.EOF { - break - } - if err != nil { - return err - } - typ, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - offset, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - l, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - numap, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - var arrayPos []uint64 - if numap > 0 { - if cap(vdc.arrayPos) < int(numap) { - vdc.arrayPos = make([]uint64, numap) - } - arrayPos = vdc.arrayPos[:numap] - for i := 0; i < int(numap); i++ { - ap, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - arrayPos[i] = ap - } - } - - value := uncompressed[offset : offset+l] - keepGoing = visitor(s.fieldsInv[field], byte(typ), value, arrayPos) - } - - vdc.buf = uncompressed - } - return nil -} - -// DocID returns the value of the _id field for the given docNum -func (s *SegmentBase) DocID(num uint64) ([]byte, error) { - if num >= s.numDocs { - return nil, nil - } - - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - - meta, compressed := s.getDocStoredMetaAndCompressed(num) - - vdc.reader.Reset(meta) - - // handle _id field special case - idFieldValLen, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return nil, err - } - idFieldVal := compressed[:idFieldValLen] - - visitDocumentCtxPool.Put(vdc) - - return idFieldVal, nil -} - -// Count returns the number of documents in this segment. -func (s *SegmentBase) Count() uint64 { - return s.numDocs -} - -// DocNumbers returns a bitset corresponding to the doc numbers of all the -// provided _id strings -func (s *SegmentBase) DocNumbers(ids []string) (*roaring.Bitmap, error) { - rv := roaring.New() - - if len(s.fieldsMap) > 0 { - idDict, err := s.dictionary("_id") - if err != nil { - return nil, err - } - - postingsList := emptyPostingsList - - sMax, err := idDict.fst.GetMaxKey() - if err != nil { - return nil, err - } - sMaxStr := string(sMax) - filteredIds := make([]string, 0, len(ids)) - for _, id := range ids { - if id <= sMaxStr { - filteredIds = append(filteredIds, id) - } - } - - for _, id := range filteredIds { - postingsList, err = idDict.postingsList([]byte(id), nil, postingsList) - if err != nil { - return nil, err - } - postingsList.OrInto(rv) - } - } - - return rv, nil -} - -// Fields returns the field names used in this segment -func (s *SegmentBase) Fields() []string { - return s.fieldsInv -} - -// Path returns the path of this segment on disk -func (s *Segment) Path() string { - return s.path -} - -// Close releases all resources associated with this segment -func (s *Segment) Close() (err error) { - return s.DecRef() -} - -func (s *Segment) closeActual() (err error) { - if s.mm != nil { - err = s.mm.Unmap() - } - // try to close file even if unmap failed - if s.f != nil { - err2 := s.f.Close() - if err == nil { - // try to return first error - err = err2 - } - } - return -} - -// some helpers i started adding for the command-line utility - -// Data returns the underlying mmaped data slice -func (s *Segment) Data() []byte { - return s.mm -} - -// CRC returns the CRC value stored in the file footer -func (s *Segment) CRC() uint32 { - return s.crc -} - -// Version returns the file version in the file footer -func (s *Segment) Version() uint32 { - return s.version -} - -// ChunkFactor returns the chunk factor in the file footer -func (s *Segment) ChunkMode() uint32 { - return s.chunkMode -} - -// FieldsIndexOffset returns the fields index offset in the file footer -func (s *Segment) FieldsIndexOffset() uint64 { - return s.fieldsIndexOffset -} - -// StoredIndexOffset returns the stored value index offset in the file footer -func (s *Segment) StoredIndexOffset() uint64 { - return s.storedIndexOffset -} - -// DocValueOffset returns the docValue offset in the file footer -func (s *Segment) DocValueOffset() uint64 { - return s.docValueOffset -} - -// NumDocs returns the number of documents in the file footer -func (s *Segment) NumDocs() uint64 { - return s.numDocs -} - -// DictAddr is a helper function to compute the file offset where the -// dictionary is stored for the specified field. -func (s *Segment) DictAddr(field string) (uint64, error) { - fieldIDPlus1, ok := s.fieldsMap[field] - if !ok { - return 0, fmt.Errorf("no such field '%s'", field) - } - - return s.dictLocs[fieldIDPlus1-1], nil -} - -func (s *SegmentBase) loadDvReaders() error { - if s.docValueOffset == fieldNotUninverted || s.numDocs == 0 { - return nil - } - - var read uint64 - for fieldID, field := range s.fieldsInv { - var fieldLocStart, fieldLocEnd uint64 - var n int - fieldLocStart, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) - if n <= 0 { - return fmt.Errorf("loadDvReaders: failed to read the docvalue offset start for field %d", fieldID) - } - read += uint64(n) - fieldLocEnd, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) - if n <= 0 { - return fmt.Errorf("loadDvReaders: failed to read the docvalue offset end for field %d", fieldID) - } - read += uint64(n) - - fieldDvReader, err := s.loadFieldDocValueReader(field, fieldLocStart, fieldLocEnd) - if err != nil { - return err - } - if fieldDvReader != nil { - s.fieldDvReaders[uint16(fieldID)] = fieldDvReader - s.fieldDvNames = append(s.fieldDvNames, field) - } - } - - return nil -} diff --git a/vendor/github.com/blevesearch/zapx/v14/sizes.go b/vendor/github.com/blevesearch/zapx/v14/sizes.go deleted file mode 100644 index 34166ea33..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/sizes.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "reflect" -) - -func init() { - var b bool - SizeOfBool = int(reflect.TypeOf(b).Size()) - var f32 float32 - SizeOfFloat32 = int(reflect.TypeOf(f32).Size()) - var f64 float64 - SizeOfFloat64 = int(reflect.TypeOf(f64).Size()) - var i int - SizeOfInt = int(reflect.TypeOf(i).Size()) - var m map[int]int - SizeOfMap = int(reflect.TypeOf(m).Size()) - var ptr *int - SizeOfPtr = int(reflect.TypeOf(ptr).Size()) - var slice []int - SizeOfSlice = int(reflect.TypeOf(slice).Size()) - var str string - SizeOfString = int(reflect.TypeOf(str).Size()) - var u8 uint8 - SizeOfUint8 = int(reflect.TypeOf(u8).Size()) - var u16 uint16 - SizeOfUint16 = int(reflect.TypeOf(u16).Size()) - var u32 uint32 - SizeOfUint32 = int(reflect.TypeOf(u32).Size()) - var u64 uint64 - SizeOfUint64 = int(reflect.TypeOf(u64).Size()) -} - -var SizeOfBool int -var SizeOfFloat32 int -var SizeOfFloat64 int -var SizeOfInt int -var SizeOfMap int -var SizeOfPtr int -var SizeOfSlice int -var SizeOfString int -var SizeOfUint8 int -var SizeOfUint16 int -var SizeOfUint32 int -var SizeOfUint64 int diff --git a/vendor/github.com/blevesearch/zapx/v14/write.go b/vendor/github.com/blevesearch/zapx/v14/write.go deleted file mode 100644 index 77aefdbfc..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/write.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "io" - - "github.com/RoaringBitmap/roaring" -) - -// writes out the length of the roaring bitmap in bytes as varint -// then writes out the roaring bitmap itself -func writeRoaringWithLen(r *roaring.Bitmap, w io.Writer, - reuseBufVarint []byte) (int, error) { - buf, err := r.ToBytes() - if err != nil { - return 0, err - } - - var tw int - - // write out the length - n := binary.PutUvarint(reuseBufVarint, uint64(len(buf))) - nw, err := w.Write(reuseBufVarint[:n]) - tw += nw - if err != nil { - return tw, err - } - - // write out the roaring bytes - nw, err = w.Write(buf) - tw += nw - if err != nil { - return tw, err - } - - return tw, nil -} - -func persistFields(fieldsInv []string, w *CountHashWriter, dictLocs []uint64) (uint64, error) { - var rv uint64 - var fieldsOffsets []uint64 - - for fieldID, fieldName := range fieldsInv { - // record start of this field - fieldsOffsets = append(fieldsOffsets, uint64(w.Count())) - - // write out the dict location and field name length - _, err := writeUvarints(w, dictLocs[fieldID], uint64(len(fieldName))) - if err != nil { - return 0, err - } - - // write out the field name - _, err = w.Write([]byte(fieldName)) - if err != nil { - return 0, err - } - } - - // now write out the fields index - rv = uint64(w.Count()) - for fieldID := range fieldsInv { - err := binary.Write(w, binary.BigEndian, fieldsOffsets[fieldID]) - if err != nil { - return 0, err - } - } - - return rv, nil -} - -// FooterSize is the size of the footer record in bytes -// crc + ver + chunk + field offset + stored offset + num docs + docValueOffset -const FooterSize = 4 + 4 + 4 + 8 + 8 + 8 + 8 - -func persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64, - chunkMode uint32, crcBeforeFooter uint32, writerIn io.Writer) error { - w := NewCountHashWriter(writerIn) - w.crc = crcBeforeFooter - - // write out the number of docs - err := binary.Write(w, binary.BigEndian, numDocs) - if err != nil { - return err - } - // write out the stored field index location: - err = binary.Write(w, binary.BigEndian, storedIndexOffset) - if err != nil { - return err - } - // write out the field index location - err = binary.Write(w, binary.BigEndian, fieldsIndexOffset) - if err != nil { - return err - } - // write out the fieldDocValue location - err = binary.Write(w, binary.BigEndian, docValueOffset) - if err != nil { - return err - } - // write out 32-bit chunk factor - err = binary.Write(w, binary.BigEndian, chunkMode) - if err != nil { - return err - } - // write out 32-bit version - err = binary.Write(w, binary.BigEndian, Version) - if err != nil { - return err - } - // write out CRC-32 of everything upto but not including this CRC - err = binary.Write(w, binary.BigEndian, w.crc) - if err != nil { - return err - } - return nil -} - -func writeUvarints(w io.Writer, vals ...uint64) (tw int, err error) { - buf := make([]byte, binary.MaxVarintLen64) - for _, val := range vals { - n := binary.PutUvarint(buf, val) - var nw int - nw, err = w.Write(buf[:n]) - tw += nw - if err != nil { - return tw, err - } - } - return tw, err -} diff --git a/vendor/github.com/blevesearch/zapx/v14/zap.md b/vendor/github.com/blevesearch/zapx/v14/zap.md deleted file mode 100644 index d74dc548b..000000000 --- a/vendor/github.com/blevesearch/zapx/v14/zap.md +++ /dev/null @@ -1,177 +0,0 @@ -# ZAP File Format - -## Legend - -### Sections - - |========| - | | section - |========| - -### Fixed-size fields - - |--------| |----| |--| |-| - | | uint64 | | uint32 | | uint16 | | uint8 - |--------| |----| |--| |-| - -### Varints - - |~~~~~~~~| - | | varint(up to uint64) - |~~~~~~~~| - -### Arbitrary-length fields - - |--------...---| - | | arbitrary-length field (string, vellum, roaring bitmap) - |--------...---| - -### Chunked data - - [--------] - [ ] - [--------] - -## Overview - -Footer section describes the configuration of particular ZAP file. The format of footer is version-dependent, so it is necessary to check `V` field before the parsing. - - |==================================================| - | Stored Fields | - |==================================================| - |-----> | Stored Fields Index | - | |==================================================| - | | Dictionaries + Postings + DocValues | - | |==================================================| - | |---> | DocValues Index | - | | |==================================================| - | | | Fields | - | | |==================================================| - | | |-> | Fields Index | - | | | |========|========|========|========|====|====|====| - | | | | D# | SF | F | FDV | CF | V | CC | (Footer) - | | | |========|====|===|====|===|====|===|====|====|====| - | | | | | | - |-+-+-----------------| | | - | |--------------------------| | - |-------------------------------------| - - D#. Number of Docs. - SF. Stored Fields Index Offset. - F. Field Index Offset. - FDV. Field DocValue Offset. - CF. Chunk Factor. - V. Version. - CC. CRC32. - -## Stored Fields - -Stored Fields Index is `D#` consecutive 64-bit unsigned integers - offsets, where relevant Stored Fields Data records are located. - - 0 [SF] [SF + D# * 8] - | Stored Fields | Stored Fields Index | - |================================|==================================| - | | | - | |--------------------| ||--------|--------|. . .|--------|| - | |-> | Stored Fields Data | || 0 | 1 | | D# - 1 || - | | |--------------------| ||--------|----|---|. . .|--------|| - | | | | | - |===|============================|==============|===================| - | | - |-------------------------------------------| - -Stored Fields Data is an arbitrary size record, which consists of metadata and [Snappy](https://github.com/golang/snappy)-compressed data. - - Stored Fields Data - |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| - | MDS | CDS | MD | CD | - |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| - - MDS. Metadata size. - CDS. Compressed data size. - MD. Metadata. - CD. Snappy-compressed data. - -## Fields - -Fields Index section located between addresses `F` and `len(file) - len(footer)` and consist of `uint64` values (`F1`, `F2`, ...) which are offsets to records in Fields section. We have `F# = (len(file) - len(footer) - F) / sizeof(uint64)` fields. - - - (...) [F] [F + F#] - | Fields | Fields Index. | - |================================|================================| - | | | - | |~~~~~~~~|~~~~~~~~|---...---|||--------|--------|...|--------|| - ||->| Dict | Length | Name ||| 0 | 1 | | F# - 1 || - || |~~~~~~~~|~~~~~~~~|---...---|||--------|----|---|...|--------|| - || | | | - ||===============================|==============|=================| - | | - |----------------------------------------------| - - -## Dictionaries + Postings - -Each of fields has its own dictionary, encoded in [Vellum](https://github.com/couchbase/vellum) format. Dictionary consists of pairs `(term, offset)`, where `offset` indicates the position of postings (list of documents) for this particular term. - - |================================================================|- Dictionaries + - | | Postings + - | | DocValues - | Freq/Norm (chunked) | - | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | - | |->[ Freq | Norm (float32 under varint) ] | - | | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | - | | | - | |------------------------------------------------------------| | - | Location Details (chunked) | | - | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | - | |->[ Size | Pos | Start | End | Arr# | ArrPos | ... ] | | - | | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | - | | | | - | |----------------------| | | - | Postings List | | | - | |~~~~~~~~|~~~~~|~~|~~~~~~~~|-----------...--| | | - | |->| F/N | LD | Length | ROARING BITMAP | | | - | | |~~~~~|~~|~~~~~~~~|~~~~~~~~|-----------...--| | | - | | |----------------------------------------------| | - | |--------------------------------------| | - | Dictionary | | - | |~~~~~~~~|--------------------------|-...-| | - | |->| Length | VELLUM DATA : (TERM -> OFFSET) | | - | | |~~~~~~~~|----------------------------...-| | - | | | - |======|=========================================================|- DocValues Index - | | | - |======|=========================================================|- Fields - | | | - | |~~~~|~~~|~~~~~~~~|---...---| | - | | Dict | Length | Name | | - | |~~~~~~~~|~~~~~~~~|---...---| | - | | - |================================================================| - -## DocValues - -DocValues Index is `F#` pairs of varints, one pair per field. Each pair of varints indicates start and end point of DocValues slice. - - |================================================================| - | |------...--| | - | |->| DocValues |<-| | - | | |------...--| | | - |==|=================|===========================================|- DocValues Index - ||~|~~~~~~~~~|~~~~~~~|~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| - || DV1 START | DV1 STOP | . . . . . | DV(F#) START | DV(F#) END || - ||~~~~~~~~~~~|~~~~~~~~~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| - |================================================================| - -DocValues is chunked Snappy-compressed values for each document and field. - - [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] - [ Doc# in Chunk | Doc1 | Offset1 | ... | DocN | OffsetN | SNAPPY COMPRESSED DATA ] - [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] - -Last 16 bytes are description of chunks. - - |~~~~~~~~~~~~...~|----------------|----------------| - | Chunk Sizes | Chunk Size Arr | Chunk# | - |~~~~~~~~~~~~...~|----------------|----------------| diff --git a/vendor/github.com/blevesearch/zapx/v15/.gitignore b/vendor/github.com/blevesearch/zapx/v15/.gitignore deleted file mode 100644 index 46d1cfad5..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -#* -*.sublime-* -*~ -.#* -.project -.settings -**/.idea/ -**/*.iml -.DS_Store -/cmd/zap/zap -*.test -tags diff --git a/vendor/github.com/blevesearch/zapx/v15/.golangci.yml b/vendor/github.com/blevesearch/zapx/v15/.golangci.yml deleted file mode 100644 index f0f2f6067..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/.golangci.yml +++ /dev/null @@ -1,29 +0,0 @@ -linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true - enable: - - bodyclose - - deadcode - - depguard - - dupl - - errcheck - - gofmt - - goimports - - goprintffuncname - - gosec - - gosimple - - govet - - ineffassign - - interfacer - - misspell - - nakedret - - nolintlint - - rowserrcheck - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - whitespace - diff --git a/vendor/github.com/blevesearch/zapx/v15/LICENSE b/vendor/github.com/blevesearch/zapx/v15/LICENSE deleted file mode 100644 index 7a4a3ea24..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/blevesearch/zapx/v15/README.md b/vendor/github.com/blevesearch/zapx/v15/README.md deleted file mode 100644 index 4cbf1a145..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# zapx file format - -The zapx module is fork of [zap](https://github.com/blevesearch/zap) module which maintains file format compatibility, but removes dependency on bleve, and instead depends only on the indepenent interface modules: - -- [bleve_index_api](https://github.com/blevesearch/scorch_segment_api) -- [scorch_segment_api](https://github.com/blevesearch/scorch_segment_api) - -Advanced ZAP File Format Documentation is [here](zap.md). - -The file is written in the reverse order that we typically access data. This helps us write in one pass since later sections of the file require file offsets of things we've already written. - -Current usage: - -- mmap the entire file -- crc-32 bytes and version are in fixed position at end of the file -- reading remainder of footer could be version specific -- remainder of footer gives us: - - 3 important offsets (docValue , fields index and stored data index) - - 2 important values (number of docs and chunk factor) -- field data is processed once and memoized onto the heap so that we never have to go back to disk for it -- access to stored data by doc number means first navigating to the stored data index, then accessing a fixed position offset into that slice, which gives us the actual address of the data. the first bytes of that section tell us the size of data so that we know where it ends. -- access to all other indexed data follows the following pattern: - - first know the field name -> convert to id - - next navigate to term dictionary for that field - - some operations stop here and do dictionary ops - - next use dictionary to navigate to posting list for a specific term - - walk posting list - - if necessary, walk posting details as we go - - if location info is desired, consult location bitmap to see if it is there - -## stored fields section - -- for each document - - preparation phase: - - produce a slice of metadata bytes and data bytes - - produce these slices in field id order - - field value is appended to the data slice - - metadata slice is varint encoded with the following values for each field value - - field id (uint16) - - field type (byte) - - field value start offset in uncompressed data slice (uint64) - - field value length (uint64) - - field number of array positions (uint64) - - one additional value for each array position (uint64) - - compress the data slice using snappy - - file writing phase: - - remember the start offset for this document - - write out meta data length (varint uint64) - - write out compressed data length (varint uint64) - - write out the metadata bytes - - write out the compressed data bytes - -## stored fields idx - -- for each document - - write start offset (remembered from previous section) of stored data (big endian uint64) - -With this index and a known document number, we have direct access to all the stored field data. - -## posting details (freq/norm) section - -- for each posting list - - produce a slice containing multiple consecutive chunks (each chunk is varint stream) - - produce a slice remembering offsets of where each chunk starts - - preparation phase: - - for each hit in the posting list - - if this hit is in next chunk close out encoding of last chunk and record offset start of next - - encode term frequency (uint64) - - encode norm factor (float32) - - file writing phase: - - remember start position for this posting list details - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. - -## posting details (location) section - -- for each posting list - - produce a slice containing multiple consecutive chunks (each chunk is varint stream) - - produce a slice remembering offsets of where each chunk starts - - preparation phase: - - for each hit in the posting list - - if this hit is in next chunk close out encoding of last chunk and record offset start of next - - encode field (uint16) - - encode field pos (uint64) - - encode field start (uint64) - - encode field end (uint64) - - encode number of array positions to follow (uint64) - - encode each array position (each uint64) - - file writing phase: - - remember start position for this posting list details - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. - -## postings list section - -- for each posting list - - preparation phase: - - encode roaring bitmap posting list to bytes (so we know the length) - - file writing phase: - - remember the start position for this posting list - - write freq/norm details offset (remembered from previous, as varint uint64) - - write location details offset (remembered from previous, as varint uint64) - - write length of encoded roaring bitmap - - write the serialized roaring bitmap data - -## dictionary - -- for each field - - preparation phase: - - encode vellum FST with dictionary data pointing to file offset of posting list (remembered from previous) - - file writing phase: - - remember the start position of this persistDictionary - - write length of vellum data (varint uint64) - - write out vellum data - -## fields section - -- for each field - - file writing phase: - - remember start offset for each field - - write dictionary address (remembered from previous) (varint uint64) - - write length of field name (varint uint64) - - write field name bytes - -## fields idx - -- for each field - - file writing phase: - - write big endian uint64 of start offset for each field - -NOTE: currently we don't know or record the length of this fields index. Instead we rely on the fact that we know it immediately precedes a footer of known size. - -## fields DocValue - -- for each field - - preparation phase: - - produce a slice containing multiple consecutive chunks, where each chunk is composed of a meta section followed by compressed columnar field data - - produce a slice remembering the length of each chunk - - file writing phase: - - remember the start position of this first field DocValue offset in the footer - - write out number of chunks that follow (varint uint64) - - write out length of each chunk (each a varint uint64) - - write out the byte slice containing all the chunk data - -NOTE: currently the meta header inside each chunk gives clue to the location offsets and size of the data pertaining to a given docID and any -read operation leverage that meta information to extract the document specific data from the file. - -## footer - -- file writing phase - - write number of docs (big endian uint64) - - write stored field index location (big endian uint64) - - write field index location (big endian uint64) - - write field docValue location (big endian uint64) - - write out chunk factor (big endian uint32) - - write out version (big endian uint32) - - write out file CRC of everything preceding this (big endian uint32) diff --git a/vendor/github.com/blevesearch/zapx/v15/build.go b/vendor/github.com/blevesearch/zapx/v15/build.go deleted file mode 100644 index 14309f90c..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/build.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bufio" - "math" - "os" - - "github.com/blevesearch/vellum" -) - -const Version uint32 = 15 - -const Type string = "zap" - -const fieldNotUninverted = math.MaxUint64 - -func (sb *SegmentBase) Persist(path string) error { - return PersistSegmentBase(sb, path) -} - -// PersistSegmentBase persists SegmentBase in the zap file format. -func PersistSegmentBase(sb *SegmentBase, path string) error { - flag := os.O_RDWR | os.O_CREATE - - f, err := os.OpenFile(path, flag, 0600) - if err != nil { - return err - } - - cleanup := func() { - _ = f.Close() - _ = os.Remove(path) - } - - br := bufio.NewWriter(f) - - _, err = br.Write(sb.mem) - if err != nil { - cleanup() - return err - } - - err = persistFooter(sb.numDocs, sb.storedIndexOffset, sb.fieldsIndexOffset, sb.docValueOffset, - sb.chunkMode, sb.memCRC, br) - if err != nil { - cleanup() - return err - } - - err = br.Flush() - if err != nil { - cleanup() - return err - } - - err = f.Sync() - if err != nil { - cleanup() - return err - } - - err = f.Close() - if err != nil { - cleanup() - return err - } - - return nil -} - -func persistStoredFieldValues(fieldID int, - storedFieldValues [][]byte, stf []byte, spf [][]uint64, - curr int, metaEncode varintEncoder, data []byte) ( - int, []byte, error) { - for i := 0; i < len(storedFieldValues); i++ { - // encode field - _, err := metaEncode(uint64(fieldID)) - if err != nil { - return 0, nil, err - } - // encode type - _, err = metaEncode(uint64(stf[i])) - if err != nil { - return 0, nil, err - } - // encode start offset - _, err = metaEncode(uint64(curr)) - if err != nil { - return 0, nil, err - } - // end len - _, err = metaEncode(uint64(len(storedFieldValues[i]))) - if err != nil { - return 0, nil, err - } - // encode number of array pos - _, err = metaEncode(uint64(len(spf[i]))) - if err != nil { - return 0, nil, err - } - // encode all array positions - for _, pos := range spf[i] { - _, err = metaEncode(pos) - if err != nil { - return 0, nil, err - } - } - - data = append(data, storedFieldValues[i]...) - curr += len(storedFieldValues[i]) - } - - return curr, data, nil -} - -func InitSegmentBase(mem []byte, memCRC uint32, chunkMode uint32, - fieldsMap map[string]uint16, fieldsInv []string, numDocs uint64, - storedIndexOffset uint64, fieldsIndexOffset uint64, docValueOffset uint64, - dictLocs []uint64) (*SegmentBase, error) { - sb := &SegmentBase{ - mem: mem, - memCRC: memCRC, - chunkMode: chunkMode, - fieldsMap: fieldsMap, - fieldsInv: fieldsInv, - numDocs: numDocs, - storedIndexOffset: storedIndexOffset, - fieldsIndexOffset: fieldsIndexOffset, - docValueOffset: docValueOffset, - dictLocs: dictLocs, - fieldDvReaders: make(map[uint16]*docValueReader), - fieldFSTs: make(map[uint16]*vellum.FST), - } - sb.updateSize() - - err := sb.loadDvReaders() - if err != nil { - return nil, err - } - - return sb, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v15/chunk.go b/vendor/github.com/blevesearch/zapx/v15/chunk.go deleted file mode 100644 index 4307d0ed2..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/chunk.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2019 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" -) - -// LegacyChunkMode was the original chunk mode (always chunk size 1024) -// this mode is still used for chunking doc values. -var LegacyChunkMode uint32 = 1024 - -// DefaultChunkMode is the most recent improvement to chunking and should -// be used by default. -var DefaultChunkMode uint32 = 1026 - -func getChunkSize(chunkMode uint32, cardinality uint64, maxDocs uint64) (uint64, error) { - switch { - // any chunkMode <= 1024 will always chunk with chunkSize=chunkMode - case chunkMode <= 1024: - // legacy chunk size - return uint64(chunkMode), nil - - case chunkMode == 1025: - // attempt at simple improvement - // theory - the point of chunking is to put a bound on the maximum number of - // calls to Next() needed to find a random document. ie, you should be able - // to do one jump to the correct chunk, and then walk through at most - // chunk-size items - // previously 1024 was chosen as the chunk size, but this is particularly - // wasteful for low cardinality terms. the observation is that if there - // are less than 1024 items, why not put them all in one chunk, - // this way you'll still achieve the same goal of visiting at most - // chunk-size items. - // no attempt is made to tweak any other case - if cardinality <= 1024 { - return maxDocs, nil - } - return 1024, nil - - case chunkMode == 1026: - // improve upon the ideas tested in chunkMode 1025 - // the observation that the fewest number of dense chunks is the most - // desirable layout, given the built-in assumptions of chunking - // (that we want to put an upper-bound on the number of items you must - // walk over without skipping, currently tuned to 1024) - // - // 1. compute the number of chunks needed (max 1024/chunk) - // 2. convert to chunkSize, dividing into maxDocs - numChunks := (cardinality / 1024) + 1 - chunkSize := maxDocs / numChunks - return chunkSize, nil - } - return 0, fmt.Errorf("unknown chunk mode %d", chunkMode) -} diff --git a/vendor/github.com/blevesearch/zapx/v15/contentcoder.go b/vendor/github.com/blevesearch/zapx/v15/contentcoder.go deleted file mode 100644 index c145b5a11..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/contentcoder.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "io" - "reflect" - - "github.com/golang/snappy" -) - -var reflectStaticSizeMetaData int - -func init() { - var md MetaData - reflectStaticSizeMetaData = int(reflect.TypeOf(md).Size()) -} - -var termSeparator byte = 0xff -var termSeparatorSplitSlice = []byte{termSeparator} - -type chunkedContentCoder struct { - final []byte - chunkSize uint64 - currChunk uint64 - chunkLens []uint64 - - w io.Writer - progressiveWrite bool - - chunkMetaBuf bytes.Buffer - chunkBuf bytes.Buffer - - chunkMeta []MetaData - - compressed []byte // temp buf for snappy compression -} - -// MetaData represents the data information inside a -// chunk. -type MetaData struct { - DocNum uint64 // docNum of the data inside the chunk - DocDvOffset uint64 // offset of data inside the chunk for the given docid -} - -// newChunkedContentCoder returns a new chunk content coder which -// packs data into chunks based on the provided chunkSize -func newChunkedContentCoder(chunkSize uint64, maxDocNum uint64, - w io.Writer, progressiveWrite bool) *chunkedContentCoder { - total := maxDocNum/chunkSize + 1 - rv := &chunkedContentCoder{ - chunkSize: chunkSize, - chunkLens: make([]uint64, total), - chunkMeta: make([]MetaData, 0, total), - w: w, - progressiveWrite: progressiveWrite, - } - - return rv -} - -// Reset lets you reuse this chunked content coder. Buffers are reset -// and re used. You cannot change the chunk size. -func (c *chunkedContentCoder) Reset() { - c.currChunk = 0 - c.final = c.final[:0] - c.chunkBuf.Reset() - c.chunkMetaBuf.Reset() - for i := range c.chunkLens { - c.chunkLens[i] = 0 - } - c.chunkMeta = c.chunkMeta[:0] -} - -func (c *chunkedContentCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { - total := int(maxDocNum/chunkSize + 1) - c.chunkSize = chunkSize - if cap(c.chunkLens) < total { - c.chunkLens = make([]uint64, total) - } else { - c.chunkLens = c.chunkLens[:total] - } - if cap(c.chunkMeta) < total { - c.chunkMeta = make([]MetaData, 0, total) - } -} - -// Close indicates you are done calling Add() this allows -// the final chunk to be encoded. -func (c *chunkedContentCoder) Close() error { - return c.flushContents() -} - -func (c *chunkedContentCoder) flushContents() error { - // flush the contents, with meta information at first - buf := make([]byte, binary.MaxVarintLen64) - n := binary.PutUvarint(buf, uint64(len(c.chunkMeta))) - _, err := c.chunkMetaBuf.Write(buf[:n]) - if err != nil { - return err - } - - // write out the metaData slice - for _, meta := range c.chunkMeta { - _, err := writeUvarints(&c.chunkMetaBuf, meta.DocNum, meta.DocDvOffset) - if err != nil { - return err - } - } - - // write the metadata to final data - metaData := c.chunkMetaBuf.Bytes() - c.final = append(c.final, c.chunkMetaBuf.Bytes()...) - // write the compressed data to the final data - c.compressed = snappy.Encode(c.compressed[:cap(c.compressed)], c.chunkBuf.Bytes()) - c.final = append(c.final, c.compressed...) - - c.chunkLens[c.currChunk] = uint64(len(c.compressed) + len(metaData)) - - if c.progressiveWrite { - _, err := c.w.Write(c.final) - if err != nil { - return err - } - c.final = c.final[:0] - } - - return nil -} - -// Add encodes the provided byte slice into the correct chunk for the provided -// doc num. You MUST call Add() with increasing docNums. -func (c *chunkedContentCoder) Add(docNum uint64, vals []byte) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // flush out the previous chunk details - err := c.flushContents() - if err != nil { - return err - } - // clearing the chunk specific meta for next chunk - c.chunkBuf.Reset() - c.chunkMetaBuf.Reset() - c.chunkMeta = c.chunkMeta[:0] - c.currChunk = chunk - } - - // get the starting offset for this doc - dvOffset := c.chunkBuf.Len() - dvSize, err := c.chunkBuf.Write(vals) - if err != nil { - return err - } - - c.chunkMeta = append(c.chunkMeta, MetaData{ - DocNum: docNum, - DocDvOffset: uint64(dvOffset + dvSize), - }) - return nil -} - -// Write commits all the encoded chunked contents to the provided writer. -// -// | ..... data ..... | chunk offsets (varints) -// | position of chunk offsets (uint64) | number of offsets (uint64) | -// -func (c *chunkedContentCoder) Write() (int, error) { - var tw int - - if c.final != nil { - // write out the data section first - nw, err := c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - } - - chunkOffsetsStart := uint64(tw) - - if cap(c.final) < binary.MaxVarintLen64 { - c.final = make([]byte, binary.MaxVarintLen64) - } else { - c.final = c.final[0:binary.MaxVarintLen64] - } - chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens) - // write out the chunk offsets - for _, chunkOffset := range chunkOffsets { - n := binary.PutUvarint(c.final, chunkOffset) - nw, err := c.w.Write(c.final[:n]) - tw += nw - if err != nil { - return tw, err - } - } - - chunkOffsetsLen := uint64(tw) - chunkOffsetsStart - - c.final = c.final[0:8] - // write out the length of chunk offsets - binary.BigEndian.PutUint64(c.final, chunkOffsetsLen) - nw, err := c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - - // write out the number of chunks - binary.BigEndian.PutUint64(c.final, uint64(len(c.chunkLens))) - nw, err = c.w.Write(c.final) - tw += nw - if err != nil { - return tw, err - } - - c.final = c.final[:0] - - return tw, nil -} - -// ReadDocValueBoundary elicits the start, end offsets from a -// metaData header slice -func ReadDocValueBoundary(chunk int, metaHeaders []MetaData) (uint64, uint64) { - var start uint64 - if chunk > 0 { - start = metaHeaders[chunk-1].DocDvOffset - } - return start, metaHeaders[chunk].DocDvOffset -} diff --git a/vendor/github.com/blevesearch/zapx/v15/count.go b/vendor/github.com/blevesearch/zapx/v15/count.go deleted file mode 100644 index b6135359f..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/count.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "hash/crc32" - "io" - - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -// CountHashWriter is a wrapper around a Writer which counts the number of -// bytes which have been written and computes a crc32 hash -type CountHashWriter struct { - w io.Writer - crc uint32 - n int - s segment.StatsReporter -} - -// NewCountHashWriter returns a CountHashWriter which wraps the provided Writer -func NewCountHashWriter(w io.Writer) *CountHashWriter { - return &CountHashWriter{w: w} -} - -func NewCountHashWriterWithStatsReporter(w io.Writer, s segment.StatsReporter) *CountHashWriter { - return &CountHashWriter{w: w, s: s} -} - -// Write writes the provided bytes to the wrapped writer and counts the bytes -func (c *CountHashWriter) Write(b []byte) (int, error) { - n, err := c.w.Write(b) - c.crc = crc32.Update(c.crc, crc32.IEEETable, b[:n]) - c.n += n - if c.s != nil { - c.s.ReportBytesWritten(uint64(n)) - } - return n, err -} - -// Count returns the number of bytes written -func (c *CountHashWriter) Count() int { - return c.n -} - -// Sum32 returns the CRC-32 hash of the content written to this writer -func (c *CountHashWriter) Sum32() uint32 { - return c.crc -} diff --git a/vendor/github.com/blevesearch/zapx/v15/dict.go b/vendor/github.com/blevesearch/zapx/v15/dict.go deleted file mode 100644 index e30bf2420..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/dict.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" -) - -// Dictionary is the zap representation of the term dictionary -type Dictionary struct { - sb *SegmentBase - field string - fieldID uint16 - fst *vellum.FST - fstReader *vellum.Reader -} - -// represents an immutable, empty dictionary -var emptyDictionary = &Dictionary{} - -// PostingsList returns the postings list for the specified term -func (d *Dictionary) PostingsList(term []byte, except *roaring.Bitmap, - prealloc segment.PostingsList) (segment.PostingsList, error) { - var preallocPL *PostingsList - pl, ok := prealloc.(*PostingsList) - if ok && pl != nil { - preallocPL = pl - } - return d.postingsList(term, except, preallocPL) -} - -func (d *Dictionary) postingsList(term []byte, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) { - if d.fstReader == nil { - if rv == nil || rv == emptyPostingsList { - return emptyPostingsList, nil - } - return d.postingsListInit(rv, except), nil - } - - postingsOffset, exists, err := d.fstReader.Get(term) - if err != nil { - return nil, fmt.Errorf("vellum err: %v", err) - } - if !exists { - if rv == nil || rv == emptyPostingsList { - return emptyPostingsList, nil - } - return d.postingsListInit(rv, except), nil - } - - return d.postingsListFromOffset(postingsOffset, except, rv) -} - -func (d *Dictionary) postingsListFromOffset(postingsOffset uint64, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) { - rv = d.postingsListInit(rv, except) - - err := rv.read(postingsOffset, d) - if err != nil { - return nil, err - } - - return rv, nil -} - -func (d *Dictionary) postingsListInit(rv *PostingsList, except *roaring.Bitmap) *PostingsList { - if rv == nil || rv == emptyPostingsList { - rv = &PostingsList{} - } else { - postings := rv.postings - if postings != nil { - postings.Clear() - } - - *rv = PostingsList{} // clear the struct - - rv.postings = postings - } - rv.sb = d.sb - rv.except = except - return rv -} - -func (d *Dictionary) Contains(key []byte) (bool, error) { - if d.fst != nil { - return d.fst.Contains(key) - } - return false, nil -} - -// AutomatonIterator returns an iterator which only visits terms -// having the the vellum automaton and start/end key range -func (d *Dictionary) AutomatonIterator(a segment.Automaton, - startKeyInclusive, endKeyExclusive []byte) segment.DictionaryIterator { - if d.fst != nil { - rv := &DictionaryIterator{ - d: d, - } - - itr, err := d.fst.Search(a, startKeyInclusive, endKeyExclusive) - if err == nil { - rv.itr = itr - } else if err != vellum.ErrIteratorDone { - rv.err = err - } - - return rv - } - return emptyDictionaryIterator -} - -// DictionaryIterator is an iterator for term dictionary -type DictionaryIterator struct { - d *Dictionary - itr vellum.Iterator - err error - tmp PostingsList - entry index.DictEntry - omitCount bool -} - -var emptyDictionaryIterator = &DictionaryIterator{} - -// Next returns the next entry in the dictionary -func (i *DictionaryIterator) Next() (*index.DictEntry, error) { - if i.err != nil && i.err != vellum.ErrIteratorDone { - return nil, i.err - } else if i.itr == nil || i.err == vellum.ErrIteratorDone { - return nil, nil - } - term, postingsOffset := i.itr.Current() - i.entry.Term = string(term) - if !i.omitCount { - i.err = i.tmp.read(postingsOffset, i.d) - if i.err != nil { - return nil, i.err - } - i.entry.Count = i.tmp.Count() - } - i.err = i.itr.Next() - return &i.entry, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v15/docvalues.go b/vendor/github.com/blevesearch/zapx/v15/docvalues.go deleted file mode 100644 index a530aa5ad..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/docvalues.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "fmt" - "math" - "reflect" - "sort" - - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/golang/snappy" -) - -var reflectStaticSizedocValueReader int - -func init() { - var dvi docValueReader - reflectStaticSizedocValueReader = int(reflect.TypeOf(dvi).Size()) -} - -type docNumTermsVisitor func(docNum uint64, terms []byte) error - -type docVisitState struct { - dvrs map[uint16]*docValueReader - segment *SegmentBase -} - -type docValueReader struct { - field string - curChunkNum uint64 - chunkOffsets []uint64 - dvDataLoc uint64 - curChunkHeader []MetaData - curChunkData []byte // compressed data cache - uncompressed []byte // temp buf for snappy decompression -} - -func (di *docValueReader) size() int { - return reflectStaticSizedocValueReader + SizeOfPtr + - len(di.field) + - len(di.chunkOffsets)*SizeOfUint64 + - len(di.curChunkHeader)*reflectStaticSizeMetaData + - len(di.curChunkData) -} - -func (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader { - if rv == nil { - rv = &docValueReader{} - } - - rv.field = di.field - rv.curChunkNum = math.MaxUint64 - rv.chunkOffsets = di.chunkOffsets // immutable, so it's sharable - rv.dvDataLoc = di.dvDataLoc - rv.curChunkHeader = rv.curChunkHeader[:0] - rv.curChunkData = nil - rv.uncompressed = rv.uncompressed[:0] - - return rv -} - -func (di *docValueReader) curChunkNumber() uint64 { - return di.curChunkNum -} - -func (s *SegmentBase) loadFieldDocValueReader(field string, - fieldDvLocStart, fieldDvLocEnd uint64) (*docValueReader, error) { - // get the docValue offset for the given fields - if fieldDvLocStart == fieldNotUninverted { - // no docValues found, nothing to do - return nil, nil - } - - // read the number of chunks, and chunk offsets position - var numChunks, chunkOffsetsPosition uint64 - - if fieldDvLocEnd-fieldDvLocStart > 16 { - numChunks = binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-8 : fieldDvLocEnd]) - // read the length of chunk offsets - chunkOffsetsLen := binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-16 : fieldDvLocEnd-8]) - // acquire position of chunk offsets - chunkOffsetsPosition = (fieldDvLocEnd - 16) - chunkOffsetsLen - } else { - return nil, fmt.Errorf("loadFieldDocValueReader: fieldDvLoc too small: %d-%d", fieldDvLocEnd, fieldDvLocStart) - } - - fdvIter := &docValueReader{ - curChunkNum: math.MaxUint64, - field: field, - chunkOffsets: make([]uint64, int(numChunks)), - } - - // read the chunk offsets - var offset uint64 - for i := 0; i < int(numChunks); i++ { - loc, read := binary.Uvarint(s.mem[chunkOffsetsPosition+offset : chunkOffsetsPosition+offset+binary.MaxVarintLen64]) - if read <= 0 { - return nil, fmt.Errorf("corrupted chunk offset during segment load") - } - fdvIter.chunkOffsets[i] = loc - offset += uint64(read) - } - - // set the data offset - fdvIter.dvDataLoc = fieldDvLocStart - - return fdvIter, nil -} - -func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error { - // advance to the chunk where the docValues - // reside for the given docNum - destChunkDataLoc, curChunkEnd := di.dvDataLoc, di.dvDataLoc - start, end := readChunkBoundary(int(chunkNumber), di.chunkOffsets) - if start >= end { - di.curChunkHeader = di.curChunkHeader[:0] - di.curChunkData = nil - di.curChunkNum = chunkNumber - di.uncompressed = di.uncompressed[:0] - return nil - } - - destChunkDataLoc += start - curChunkEnd += end - - // read the number of docs reside in the chunk - numDocs, read := binary.Uvarint(s.mem[destChunkDataLoc : destChunkDataLoc+binary.MaxVarintLen64]) - if read <= 0 { - return fmt.Errorf("failed to read the chunk") - } - chunkMetaLoc := destChunkDataLoc + uint64(read) - - offset := uint64(0) - if cap(di.curChunkHeader) < int(numDocs) { - di.curChunkHeader = make([]MetaData, int(numDocs)) - } else { - di.curChunkHeader = di.curChunkHeader[:int(numDocs)] - } - for i := 0; i < int(numDocs); i++ { - di.curChunkHeader[i].DocNum, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) - offset += uint64(read) - di.curChunkHeader[i].DocDvOffset, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) - offset += uint64(read) - } - - compressedDataLoc := chunkMetaLoc + offset - dataLength := curChunkEnd - compressedDataLoc - di.curChunkData = s.mem[compressedDataLoc : compressedDataLoc+dataLength] - di.curChunkNum = chunkNumber - di.uncompressed = di.uncompressed[:0] - return nil -} - -func (di *docValueReader) iterateAllDocValues(s *SegmentBase, visitor docNumTermsVisitor) error { - for i := 0; i < len(di.chunkOffsets); i++ { - err := di.loadDvChunk(uint64(i), s) - if err != nil { - return err - } - if di.curChunkData == nil || len(di.curChunkHeader) == 0 { - continue - } - - // uncompress the already loaded data - uncompressed, err := snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData) - if err != nil { - return err - } - di.uncompressed = uncompressed - - start := uint64(0) - for _, entry := range di.curChunkHeader { - err = visitor(entry.DocNum, uncompressed[start:entry.DocDvOffset]) - if err != nil { - return err - } - - start = entry.DocDvOffset - } - } - - return nil -} - -func (di *docValueReader) visitDocValues(docNum uint64, - visitor index.DocValueVisitor) error { - // binary search the term locations for the docNum - start, end := di.getDocValueLocs(docNum) - if start == math.MaxUint64 || end == math.MaxUint64 || start == end { - return nil - } - - var uncompressed []byte - var err error - // use the uncompressed copy if available - if len(di.uncompressed) > 0 { - uncompressed = di.uncompressed - } else { - // uncompress the already loaded data - uncompressed, err = snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData) - if err != nil { - return err - } - di.uncompressed = uncompressed - } - - // pick the terms for the given docNum - uncompressed = uncompressed[start:end] - for { - i := bytes.Index(uncompressed, termSeparatorSplitSlice) - if i < 0 { - break - } - - visitor(di.field, uncompressed[0:i]) - uncompressed = uncompressed[i+1:] - } - - return nil -} - -func (di *docValueReader) getDocValueLocs(docNum uint64) (uint64, uint64) { - i := sort.Search(len(di.curChunkHeader), func(i int) bool { - return di.curChunkHeader[i].DocNum >= docNum - }) - if i < len(di.curChunkHeader) && di.curChunkHeader[i].DocNum == docNum { - return ReadDocValueBoundary(i, di.curChunkHeader) - } - return math.MaxUint64, math.MaxUint64 -} - -// VisitDocValues is an implementation of the -// DocValueVisitable interface -func (s *SegmentBase) VisitDocValues(localDocNum uint64, fields []string, - visitor index.DocValueVisitor, dvsIn segment.DocVisitState) ( - segment.DocVisitState, error) { - dvs, ok := dvsIn.(*docVisitState) - if !ok || dvs == nil { - dvs = &docVisitState{} - } else { - if dvs.segment != s { - dvs.segment = s - dvs.dvrs = nil - } - } - - var fieldIDPlus1 uint16 - if dvs.dvrs == nil { - dvs.dvrs = make(map[uint16]*docValueReader, len(fields)) - for _, field := range fields { - if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { - continue - } - fieldID := fieldIDPlus1 - 1 - if dvIter, exists := s.fieldDvReaders[fieldID]; exists && - dvIter != nil { - dvs.dvrs[fieldID] = dvIter.cloneInto(dvs.dvrs[fieldID]) - } - } - } - - // find the chunkNumber where the docValues are stored - // NOTE: doc values continue to use legacy chunk mode - chunkFactor, err := getChunkSize(LegacyChunkMode, 0, 0) - if err != nil { - return nil, err - } - docInChunk := localDocNum / chunkFactor - var dvr *docValueReader - for _, field := range fields { - if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { - continue - } - fieldID := fieldIDPlus1 - 1 - if dvr, ok = dvs.dvrs[fieldID]; ok && dvr != nil { - // check if the chunk is already loaded - if docInChunk != dvr.curChunkNumber() { - err := dvr.loadDvChunk(docInChunk, s) - if err != nil { - return dvs, err - } - } - - _ = dvr.visitDocValues(localDocNum, visitor) - } - } - return dvs, nil -} - -// VisitableDocValueFields returns the list of fields with -// persisted doc value terms ready to be visitable using the -// VisitDocumentFieldTerms method. -func (s *SegmentBase) VisitableDocValueFields() ([]string, error) { - return s.fieldDvNames, nil -} diff --git a/vendor/github.com/blevesearch/zapx/v15/enumerator.go b/vendor/github.com/blevesearch/zapx/v15/enumerator.go deleted file mode 100644 index 972a22416..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/enumerator.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - - "github.com/blevesearch/vellum" -) - -// enumerator provides an ordered traversal of multiple vellum -// iterators. Like JOIN of iterators, the enumerator produces a -// sequence of (key, iteratorIndex, value) tuples, sorted by key ASC, -// then iteratorIndex ASC, where the same key might be seen or -// repeated across multiple child iterators. -type enumerator struct { - itrs []vellum.Iterator - currKs [][]byte - currVs []uint64 - - lowK []byte - lowIdxs []int - lowCurr int -} - -// newEnumerator returns a new enumerator over the vellum Iterators -func newEnumerator(itrs []vellum.Iterator) (*enumerator, error) { - rv := &enumerator{ - itrs: itrs, - currKs: make([][]byte, len(itrs)), - currVs: make([]uint64, len(itrs)), - lowIdxs: make([]int, 0, len(itrs)), - } - for i, itr := range rv.itrs { - rv.currKs[i], rv.currVs[i] = itr.Current() - } - rv.updateMatches(false) - if rv.lowK == nil && len(rv.lowIdxs) == 0 { - return rv, vellum.ErrIteratorDone - } - return rv, nil -} - -// updateMatches maintains the low key matches based on the currKs -func (m *enumerator) updateMatches(skipEmptyKey bool) { - m.lowK = nil - m.lowIdxs = m.lowIdxs[:0] - m.lowCurr = 0 - - for i, key := range m.currKs { - if (key == nil && m.currVs[i] == 0) || // in case of empty iterator - (len(key) == 0 && skipEmptyKey) { // skip empty keys - continue - } - - cmp := bytes.Compare(key, m.lowK) - if cmp < 0 || len(m.lowIdxs) == 0 { - // reached a new low - m.lowK = key - m.lowIdxs = m.lowIdxs[:0] - m.lowIdxs = append(m.lowIdxs, i) - } else if cmp == 0 { - m.lowIdxs = append(m.lowIdxs, i) - } - } -} - -// Current returns the enumerator's current key, iterator-index, and -// value. If the enumerator is not pointing at a valid value (because -// Next returned an error previously), Current will return nil,0,0. -func (m *enumerator) Current() ([]byte, int, uint64) { - var i int - var v uint64 - if m.lowCurr < len(m.lowIdxs) { - i = m.lowIdxs[m.lowCurr] - v = m.currVs[i] - } - return m.lowK, i, v -} - -// GetLowIdxsAndValues will return all of the iterator indices -// which point to the current key, and their corresponding -// values. This can be used by advanced caller which may need -// to peek into these other sets of data before processing. -func (m *enumerator) GetLowIdxsAndValues() ([]int, []uint64) { - values := make([]uint64, 0, len(m.lowIdxs)) - for _, idx := range m.lowIdxs { - values = append(values, m.currVs[idx]) - } - return m.lowIdxs, values -} - -// Next advances the enumerator to the next key/iterator/value result, -// else vellum.ErrIteratorDone is returned. -func (m *enumerator) Next() error { - m.lowCurr += 1 - if m.lowCurr >= len(m.lowIdxs) { - // move all the current low iterators forwards - for _, vi := range m.lowIdxs { - err := m.itrs[vi].Next() - if err != nil && err != vellum.ErrIteratorDone { - return err - } - m.currKs[vi], m.currVs[vi] = m.itrs[vi].Current() - } - // can skip any empty keys encountered at this point - m.updateMatches(true) - } - if m.lowK == nil && len(m.lowIdxs) == 0 { - return vellum.ErrIteratorDone - } - return nil -} - -// Close all the underlying Iterators. The first error, if any, will -// be returned. -func (m *enumerator) Close() error { - var rv error - for _, itr := range m.itrs { - err := itr.Close() - if rv == nil { - rv = err - } - } - return rv -} diff --git a/vendor/github.com/blevesearch/zapx/v15/go.mod b/vendor/github.com/blevesearch/zapx/v15/go.mod deleted file mode 100644 index 5f098983b..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/blevesearch/zapx/v15 - -go 1.12 - -require ( - github.com/RoaringBitmap/roaring v0.4.23 - github.com/blevesearch/bleve_index_api v1.0.0 - github.com/blevesearch/mmap-go v1.0.2 - github.com/blevesearch/scorch_segment_api/v2 v2.0.1 - github.com/blevesearch/vellum v1.0.3 - github.com/golang/snappy v0.0.1 - github.com/spf13/cobra v0.0.5 -) diff --git a/vendor/github.com/blevesearch/zapx/v15/go.sum b/vendor/github.com/blevesearch/zapx/v15/go.sum deleted file mode 100644 index 68e45348c..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/go.sum +++ /dev/null @@ -1,73 +0,0 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= -github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= -github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= -github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1 h1:fd+hPtZ8GsbqPK1HslGp7Vhoik4arZteA/IsCEgOisw= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1/go.mod h1:lq7yK2jQy1yQjtjTfU931aVqz7pYxEudHaDwOt1tXfU= -github.com/blevesearch/vellum v1.0.3 h1:U86G41A7CtXNzzpIJHM8lSTUqz1Mp8U870TkcdCzZc8= -github.com/blevesearch/vellum v1.0.3/go.mod h1:2u5ax02KeDuNWu4/C+hVQMD6uLN4txH1JbtpaDNLJRo= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= -github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/blevesearch/zapx/v15/memuvarint.go b/vendor/github.com/blevesearch/zapx/v15/memuvarint.go deleted file mode 100644 index 0c10c83a4..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/memuvarint.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "fmt" -) - -type memUvarintReader struct { - C int // index of next byte to read from S - S []byte -} - -func newMemUvarintReader(s []byte) *memUvarintReader { - return &memUvarintReader{S: s} -} - -// Len returns the number of unread bytes. -func (r *memUvarintReader) Len() int { - n := len(r.S) - r.C - if n < 0 { - return 0 - } - return n -} - -// ReadUvarint reads an encoded uint64. The original code this was -// based on is at encoding/binary/ReadUvarint(). -func (r *memUvarintReader) ReadUvarint() (uint64, error) { - var x uint64 - var s uint - var C = r.C - var S = r.S - - for { - b := S[C] - C++ - - if b < 0x80 { - r.C = C - - // why 63? The original code had an 'i += 1' loop var and - // checked for i > 9 || i == 9 ...; but, we no longer - // check for the i var, but instead check here for s, - // which is incremented by 7. So, 7*9 == 63. - // - // why the "extra" >= check? The normal case is that s < - // 63, so we check this single >= guard first so that we - // hit the normal, nil-error return pathway sooner. - if s >= 63 && (s > 63 || s == 63 && b > 1) { - return 0, fmt.Errorf("memUvarintReader overflow") - } - - return x | uint64(b)< 0 { - storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops, - fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - - dictLocs, docValueOffset, err = persistMergedRest(segments, drops, - fieldsInv, fieldsMap, fieldsSame, - newDocNums, numDocs, chunkMode, cr, closeCh) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - } else { - dictLocs = make([]uint64, len(fieldsInv)) - } - - fieldsIndexOffset, err = persistFields(fieldsInv, cr, dictLocs) - if err != nil { - return nil, 0, 0, 0, 0, nil, nil, nil, err - } - - return newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, dictLocs, fieldsInv, fieldsMap, nil -} - -// mapFields takes the fieldsInv list and returns a map of fieldName -// to fieldID+1 -func mapFields(fields []string) map[string]uint16 { - rv := make(map[string]uint16, len(fields)) - for i, fieldName := range fields { - rv[fieldName] = uint16(i) + 1 - } - return rv -} - -// computeNewDocCount determines how many documents will be in the newly -// merged segment when obsoleted docs are dropped -func computeNewDocCount(segments []*SegmentBase, drops []*roaring.Bitmap) uint64 { - var newDocCount uint64 - for segI, segment := range segments { - newDocCount += segment.numDocs - if drops[segI] != nil { - newDocCount -= drops[segI].GetCardinality() - } - } - return newDocCount -} - -func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap, - fieldsInv []string, fieldsMap map[string]uint16, fieldsSame bool, - newDocNumsIn [][]uint64, newSegDocCount uint64, chunkMode uint32, - w *CountHashWriter, closeCh chan struct{}) ([]uint64, uint64, error) { - var bufMaxVarintLen64 []byte = make([]byte, binary.MaxVarintLen64) - var bufLoc []uint64 - - var postings *PostingsList - var postItr *PostingsIterator - - rv := make([]uint64, len(fieldsInv)) - fieldDvLocsStart := make([]uint64, len(fieldsInv)) - fieldDvLocsEnd := make([]uint64, len(fieldsInv)) - - // these int coders are initialized with chunk size 1024 - // however this will be reset to the correct chunk size - // while processing each individual field-term section - tfEncoder := newChunkedIntCoder(1024, newSegDocCount-1) - locEncoder := newChunkedIntCoder(1024, newSegDocCount-1) - - var vellumBuf bytes.Buffer - newVellum, err := vellum.New(&vellumBuf, nil) - if err != nil { - return nil, 0, err - } - - newRoaring := roaring.NewBitmap() - - // for each field - for fieldID, fieldName := range fieldsInv { - // collect FST iterators from all active segments for this field - var newDocNums [][]uint64 - var drops []*roaring.Bitmap - var dicts []*Dictionary - var itrs []vellum.Iterator - - var segmentsInFocus []*SegmentBase - - for segmentI, segment := range segments { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - dict, err2 := segment.dictionary(fieldName) - if err2 != nil { - return nil, 0, err2 - } - if dict != nil && dict.fst != nil { - itr, err2 := dict.fst.Iterator(nil, nil) - if err2 != nil && err2 != vellum.ErrIteratorDone { - return nil, 0, err2 - } - if itr != nil { - newDocNums = append(newDocNums, newDocNumsIn[segmentI]) - if dropsIn[segmentI] != nil && !dropsIn[segmentI].IsEmpty() { - drops = append(drops, dropsIn[segmentI]) - } else { - drops = append(drops, nil) - } - dicts = append(dicts, dict) - itrs = append(itrs, itr) - segmentsInFocus = append(segmentsInFocus, segment) - } - } - } - - var prevTerm []byte - - newRoaring.Clear() - - var lastDocNum, lastFreq, lastNorm uint64 - - // determines whether to use "1-hit" encoding optimization - // when a term appears in only 1 doc, with no loc info, - // has freq of 1, and the docNum fits into 31-bits - use1HitEncoding := func(termCardinality uint64) (bool, uint64, uint64) { - if termCardinality == uint64(1) && locEncoder.FinalSize() <= 0 { - docNum := uint64(newRoaring.Minimum()) - if under32Bits(docNum) && docNum == lastDocNum && lastFreq == 1 { - return true, docNum, lastNorm - } - } - return false, 0, 0 - } - - finishTerm := func(term []byte) error { - tfEncoder.Close() - locEncoder.Close() - - postingsOffset, err := writePostings(newRoaring, - tfEncoder, locEncoder, use1HitEncoding, w, bufMaxVarintLen64) - if err != nil { - return err - } - - if postingsOffset > 0 { - err = newVellum.Insert(term, postingsOffset) - if err != nil { - return err - } - } - - newRoaring.Clear() - - tfEncoder.Reset() - locEncoder.Reset() - - lastDocNum = 0 - lastFreq = 0 - lastNorm = 0 - - return nil - } - - enumerator, err := newEnumerator(itrs) - - for err == nil { - term, itrI, postingsOffset := enumerator.Current() - - if !bytes.Equal(prevTerm, term) { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - // if the term changed, write out the info collected - // for the previous term - err = finishTerm(prevTerm) - if err != nil { - return nil, 0, err - } - } - if !bytes.Equal(prevTerm, term) || prevTerm == nil { - // compute cardinality of field-term in new seg - var newCard uint64 - lowItrIdxs, lowItrVals := enumerator.GetLowIdxsAndValues() - for i, idx := range lowItrIdxs { - pl, err := dicts[idx].postingsListFromOffset(lowItrVals[i], drops[idx], nil) - if err != nil { - return nil, 0, err - } - newCard += pl.Count() - } - // compute correct chunk size with this - chunkSize, err := getChunkSize(chunkMode, newCard, newSegDocCount) - if err != nil { - return nil, 0, err - } - // update encoders chunk - tfEncoder.SetChunkSize(chunkSize, newSegDocCount-1) - locEncoder.SetChunkSize(chunkSize, newSegDocCount-1) - } - - postings, err = dicts[itrI].postingsListFromOffset( - postingsOffset, drops[itrI], postings) - if err != nil { - return nil, 0, err - } - - postItr = postings.iterator(true, true, true, postItr) - - if fieldsSame { - // can optimize by copying freq/norm/loc bytes directly - lastDocNum, lastFreq, lastNorm, err = mergeTermFreqNormLocsByCopying( - term, postItr, newDocNums[itrI], newRoaring, - tfEncoder, locEncoder) - } else { - lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs( - fieldsMap, term, postItr, newDocNums[itrI], newRoaring, - tfEncoder, locEncoder, bufLoc) - } - if err != nil { - return nil, 0, err - } - - prevTerm = prevTerm[:0] // copy to prevTerm in case Next() reuses term mem - prevTerm = append(prevTerm, term...) - - err = enumerator.Next() - } - if err != vellum.ErrIteratorDone { - return nil, 0, err - } - - err = finishTerm(prevTerm) - if err != nil { - return nil, 0, err - } - - dictOffset := uint64(w.Count()) - - err = newVellum.Close() - if err != nil { - return nil, 0, err - } - vellumData := vellumBuf.Bytes() - - // write out the length of the vellum data - n := binary.PutUvarint(bufMaxVarintLen64, uint64(len(vellumData))) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return nil, 0, err - } - - // write this vellum to disk - _, err = w.Write(vellumData) - if err != nil { - return nil, 0, err - } - - rv[fieldID] = dictOffset - - // get the field doc value offset (start) - fieldDvLocsStart[fieldID] = uint64(w.Count()) - - // update the field doc values - // NOTE: doc values continue to use legacy chunk mode - chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0) - if err != nil { - return nil, 0, err - } - fdvEncoder := newChunkedContentCoder(chunkSize, newSegDocCount-1, w, true) - - fdvReadersAvailable := false - var dvIterClone *docValueReader - for segmentI, segment := range segmentsInFocus { - // check for the closure in meantime - if isClosed(closeCh) { - return nil, 0, seg.ErrClosed - } - - fieldIDPlus1 := uint16(segment.fieldsMap[fieldName]) - if dvIter, exists := segment.fieldDvReaders[fieldIDPlus1-1]; exists && - dvIter != nil { - fdvReadersAvailable = true - dvIterClone = dvIter.cloneInto(dvIterClone) - err = dvIterClone.iterateAllDocValues(segment, func(docNum uint64, terms []byte) error { - if newDocNums[segmentI][docNum] == docDropped { - return nil - } - err := fdvEncoder.Add(newDocNums[segmentI][docNum], terms) - if err != nil { - return err - } - return nil - }) - if err != nil { - return nil, 0, err - } - } - } - - if fdvReadersAvailable { - err = fdvEncoder.Close() - if err != nil { - return nil, 0, err - } - - // persist the doc value details for this field - _, err = fdvEncoder.Write() - if err != nil { - return nil, 0, err - } - - // get the field doc value offset (end) - fieldDvLocsEnd[fieldID] = uint64(w.Count()) - } else { - fieldDvLocsStart[fieldID] = fieldNotUninverted - fieldDvLocsEnd[fieldID] = fieldNotUninverted - } - - // reset vellum buffer and vellum builder - vellumBuf.Reset() - err = newVellum.Reset(&vellumBuf) - if err != nil { - return nil, 0, err - } - } - - fieldDvLocsOffset := uint64(w.Count()) - - buf := bufMaxVarintLen64 - for i := 0; i < len(fieldDvLocsStart); i++ { - n := binary.PutUvarint(buf, fieldDvLocsStart[i]) - _, err := w.Write(buf[:n]) - if err != nil { - return nil, 0, err - } - n = binary.PutUvarint(buf, fieldDvLocsEnd[i]) - _, err = w.Write(buf[:n]) - if err != nil { - return nil, 0, err - } - } - - return rv, fieldDvLocsOffset, nil -} - -func mergeTermFreqNormLocsByCopying(term []byte, postItr *PostingsIterator, - newDocNums []uint64, newRoaring *roaring.Bitmap, - tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder) ( - lastDocNum uint64, lastFreq uint64, lastNorm uint64, err error) { - nextDocNum, nextFreq, nextNorm, nextFreqNormBytes, nextLocBytes, err := - postItr.nextBytes() - for err == nil && len(nextFreqNormBytes) > 0 { - hitNewDocNum := newDocNums[nextDocNum] - if hitNewDocNum == docDropped { - return 0, 0, 0, fmt.Errorf("see hit with dropped doc num") - } - - newRoaring.Add(uint32(hitNewDocNum)) - - err = tfEncoder.AddBytes(hitNewDocNum, nextFreqNormBytes) - if err != nil { - return 0, 0, 0, err - } - - if len(nextLocBytes) > 0 { - err = locEncoder.AddBytes(hitNewDocNum, nextLocBytes) - if err != nil { - return 0, 0, 0, err - } - } - - lastDocNum = hitNewDocNum - lastFreq = nextFreq - lastNorm = nextNorm - - nextDocNum, nextFreq, nextNorm, nextFreqNormBytes, nextLocBytes, err = - postItr.nextBytes() - } - - return lastDocNum, lastFreq, lastNorm, err -} - -func mergeTermFreqNormLocs(fieldsMap map[string]uint16, term []byte, postItr *PostingsIterator, - newDocNums []uint64, newRoaring *roaring.Bitmap, - tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, bufLoc []uint64) ( - lastDocNum uint64, lastFreq uint64, lastNorm uint64, bufLocOut []uint64, err error) { - next, err := postItr.Next() - for next != nil && err == nil { - hitNewDocNum := newDocNums[next.Number()] - if hitNewDocNum == docDropped { - return 0, 0, 0, nil, fmt.Errorf("see hit with dropped docNum") - } - - newRoaring.Add(uint32(hitNewDocNum)) - - nextFreq := next.Frequency() - var nextNorm uint64 - if pi, ok := next.(*Posting); ok { - nextNorm = pi.NormUint64() - } else { - return 0, 0, 0, nil, fmt.Errorf("unexpected posting type %T", next) - } - - locs := next.Locations() - - if nextFreq > 0 { - err = tfEncoder.Add(hitNewDocNum, - encodeFreqHasLocs(nextFreq, len(locs) > 0), nextNorm) - } else { - err = tfEncoder.Add(hitNewDocNum, - encodeFreqHasLocs(nextFreq, len(locs) > 0)) - } - if err != nil { - return 0, 0, 0, nil, err - } - - if len(locs) > 0 { - numBytesLocs := 0 - for _, loc := range locs { - ap := loc.ArrayPositions() - numBytesLocs += totalUvarintBytes(uint64(fieldsMap[loc.Field()]-1), - loc.Pos(), loc.Start(), loc.End(), uint64(len(ap)), ap) - } - - err = locEncoder.Add(hitNewDocNum, uint64(numBytesLocs)) - if err != nil { - return 0, 0, 0, nil, err - } - - for _, loc := range locs { - ap := loc.ArrayPositions() - if cap(bufLoc) < 5+len(ap) { - bufLoc = make([]uint64, 0, 5+len(ap)) - } - args := bufLoc[0:5] - args[0] = uint64(fieldsMap[loc.Field()] - 1) - args[1] = loc.Pos() - args[2] = loc.Start() - args[3] = loc.End() - args[4] = uint64(len(ap)) - args = append(args, ap...) - err = locEncoder.Add(hitNewDocNum, args...) - if err != nil { - return 0, 0, 0, nil, err - } - } - } - - lastDocNum = hitNewDocNum - lastFreq = nextFreq - lastNorm = nextNorm - - next, err = postItr.Next() - } - - return lastDocNum, lastFreq, lastNorm, bufLoc, err -} - -func writePostings(postings *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder, - use1HitEncoding func(uint64) (bool, uint64, uint64), - w *CountHashWriter, bufMaxVarintLen64 []byte) ( - offset uint64, err error) { - termCardinality := postings.GetCardinality() - if termCardinality <= 0 { - return 0, nil - } - - if use1HitEncoding != nil { - encodeAs1Hit, docNum1Hit, normBits1Hit := use1HitEncoding(termCardinality) - if encodeAs1Hit { - return FSTValEncode1Hit(docNum1Hit, normBits1Hit), nil - } - } - - var tfOffset uint64 - tfOffset, _, err = tfEncoder.writeAt(w) - if err != nil { - return 0, err - } - - var locOffset uint64 - locOffset, _, err = locEncoder.writeAt(w) - if err != nil { - return 0, err - } - - postingsOffset := uint64(w.Count()) - - n := binary.PutUvarint(bufMaxVarintLen64, tfOffset) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return 0, err - } - - n = binary.PutUvarint(bufMaxVarintLen64, locOffset) - _, err = w.Write(bufMaxVarintLen64[:n]) - if err != nil { - return 0, err - } - - _, err = writeRoaringWithLen(postings, w, bufMaxVarintLen64) - if err != nil { - return 0, err - } - - return postingsOffset, nil -} - -type varintEncoder func(uint64) (int, error) - -func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap, - fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64, - w *CountHashWriter, closeCh chan struct{}) (uint64, [][]uint64, error) { - var rv [][]uint64 // The remapped or newDocNums for each segment. - - var newDocNum uint64 - - var curr int - var data, compressed []byte - var metaBuf bytes.Buffer - varBuf := make([]byte, binary.MaxVarintLen64) - metaEncode := func(val uint64) (int, error) { - wb := binary.PutUvarint(varBuf, val) - return metaBuf.Write(varBuf[:wb]) - } - - vals := make([][][]byte, len(fieldsInv)) - typs := make([][]byte, len(fieldsInv)) - poss := make([][][]uint64, len(fieldsInv)) - - var posBuf []uint64 - - docNumOffsets := make([]uint64, newSegDocCount) - - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - defer visitDocumentCtxPool.Put(vdc) - - // for each segment - for segI, segment := range segments { - // check for the closure in meantime - if isClosed(closeCh) { - return 0, nil, seg.ErrClosed - } - - segNewDocNums := make([]uint64, segment.numDocs) - - dropsI := drops[segI] - - // optimize when the field mapping is the same across all - // segments and there are no deletions, via byte-copying - // of stored docs bytes directly to the writer - if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) { - err := segment.copyStoredDocs(newDocNum, docNumOffsets, w) - if err != nil { - return 0, nil, err - } - - for i := uint64(0); i < segment.numDocs; i++ { - segNewDocNums[i] = newDocNum - newDocNum++ - } - rv = append(rv, segNewDocNums) - - continue - } - - // for each doc num - for docNum := uint64(0); docNum < segment.numDocs; docNum++ { - // TODO: roaring's API limits docNums to 32-bits? - if dropsI != nil && dropsI.Contains(uint32(docNum)) { - segNewDocNums[docNum] = docDropped - continue - } - - segNewDocNums[docNum] = newDocNum - - curr = 0 - metaBuf.Reset() - data = data[:0] - - posTemp := posBuf - - // collect all the data - for i := 0; i < len(fieldsInv); i++ { - vals[i] = vals[i][:0] - typs[i] = typs[i][:0] - poss[i] = poss[i][:0] - } - err := segment.visitStoredFields(vdc, docNum, func(field string, typ byte, value []byte, pos []uint64) bool { - fieldID := int(fieldsMap[field]) - 1 - vals[fieldID] = append(vals[fieldID], value) - typs[fieldID] = append(typs[fieldID], typ) - - // copy array positions to preserve them beyond the scope of this callback - var curPos []uint64 - if len(pos) > 0 { - if cap(posTemp) < len(pos) { - posBuf = make([]uint64, len(pos)*len(fieldsInv)) - posTemp = posBuf - } - curPos = posTemp[0:len(pos)] - copy(curPos, pos) - posTemp = posTemp[len(pos):] - } - poss[fieldID] = append(poss[fieldID], curPos) - - return true - }) - if err != nil { - return 0, nil, err - } - - // _id field special case optimizes ExternalID() lookups - idFieldVal := vals[uint16(0)][0] - _, err = metaEncode(uint64(len(idFieldVal))) - if err != nil { - return 0, nil, err - } - - // now walk the non-"_id" fields in order - for fieldID := 1; fieldID < len(fieldsInv); fieldID++ { - storedFieldValues := vals[fieldID] - - stf := typs[fieldID] - spf := poss[fieldID] - - var err2 error - curr, data, err2 = persistStoredFieldValues(fieldID, - storedFieldValues, stf, spf, curr, metaEncode, data) - if err2 != nil { - return 0, nil, err2 - } - } - - metaBytes := metaBuf.Bytes() - - compressed = snappy.Encode(compressed[:cap(compressed)], data) - - // record where we're about to start writing - docNumOffsets[newDocNum] = uint64(w.Count()) - - // write out the meta len and compressed data len - _, err = writeUvarints(w, - uint64(len(metaBytes)), - uint64(len(idFieldVal)+len(compressed))) - if err != nil { - return 0, nil, err - } - // now write the meta - _, err = w.Write(metaBytes) - if err != nil { - return 0, nil, err - } - // now write the _id field val (counted as part of the 'compressed' data) - _, err = w.Write(idFieldVal) - if err != nil { - return 0, nil, err - } - // now write the compressed data - _, err = w.Write(compressed) - if err != nil { - return 0, nil, err - } - - newDocNum++ - } - - rv = append(rv, segNewDocNums) - } - - // return value is the start of the stored index - storedIndexOffset := uint64(w.Count()) - - // now write out the stored doc index - for _, docNumOffset := range docNumOffsets { - err := binary.Write(w, binary.BigEndian, docNumOffset) - if err != nil { - return 0, nil, err - } - } - - return storedIndexOffset, rv, nil -} - -// copyStoredDocs writes out a segment's stored doc info, optimized by -// using a single Write() call for the entire set of bytes. The -// newDocNumOffsets is filled with the new offsets for each doc. -func (s *SegmentBase) copyStoredDocs(newDocNum uint64, newDocNumOffsets []uint64, - w *CountHashWriter) error { - if s.numDocs <= 0 { - return nil - } - - indexOffset0, storedOffset0, _, _, _ := - s.getDocStoredOffsets(0) // the segment's first doc - - indexOffsetN, storedOffsetN, readN, metaLenN, dataLenN := - s.getDocStoredOffsets(s.numDocs - 1) // the segment's last doc - - storedOffset0New := uint64(w.Count()) - - storedBytes := s.mem[storedOffset0 : storedOffsetN+readN+metaLenN+dataLenN] - _, err := w.Write(storedBytes) - if err != nil { - return err - } - - // remap the storedOffset's for the docs into new offsets relative - // to storedOffset0New, filling the given docNumOffsetsOut array - for indexOffset := indexOffset0; indexOffset <= indexOffsetN; indexOffset += 8 { - storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) - storedOffsetNew := storedOffset - storedOffset0 + storedOffset0New - newDocNumOffsets[newDocNum] = storedOffsetNew - newDocNum += 1 - } - - return nil -} - -// mergeFields builds a unified list of fields used across all the -// input segments, and computes whether the fields are the same across -// segments (which depends on fields to be sorted in the same way -// across segments) -func mergeFields(segments []*SegmentBase) (bool, []string) { - fieldsSame := true - - var segment0Fields []string - if len(segments) > 0 { - segment0Fields = segments[0].Fields() - } - - fieldsExist := map[string]struct{}{} - for _, segment := range segments { - fields := segment.Fields() - for fieldi, field := range fields { - fieldsExist[field] = struct{}{} - if len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field { - fieldsSame = false - } - } - } - - rv := make([]string, 0, len(fieldsExist)) - // ensure _id stays first - rv = append(rv, "_id") - for k := range fieldsExist { - if k != "_id" { - rv = append(rv, k) - } - } - - sort.Strings(rv[1:]) // leave _id as first - - return fieldsSame, rv -} - -func isClosed(closeCh chan struct{}) bool { - select { - case <-closeCh: - return true - default: - return false - } -} diff --git a/vendor/github.com/blevesearch/zapx/v15/new.go b/vendor/github.com/blevesearch/zapx/v15/new.go deleted file mode 100644 index 362715d44..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/new.go +++ /dev/null @@ -1,837 +0,0 @@ -// Copyright (c) 2018 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "math" - "sort" - "sync" - - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - "github.com/golang/snappy" -) - -var NewSegmentBufferNumResultsBump int = 100 -var NewSegmentBufferNumResultsFactor float64 = 1.0 -var NewSegmentBufferAvgBytesPerDocFactor float64 = 1.0 - -// ValidateDocFields can be set by applications to perform additional checks -// on fields in a document being added to a new segment, by default it does -// nothing. -// This API is experimental and may be removed at any time. -var ValidateDocFields = func(field index.Field) error { - return nil -} - -// New creates an in-memory zap-encoded SegmentBase from a set of Documents -func (z *ZapPlugin) New(results []index.Document) ( - segment.Segment, uint64, error) { - return z.newWithChunkMode(results, DefaultChunkMode) -} - -func (*ZapPlugin) newWithChunkMode(results []index.Document, - chunkMode uint32) (segment.Segment, uint64, error) { - s := interimPool.Get().(*interim) - - var br bytes.Buffer - if s.lastNumDocs > 0 { - // use previous results to initialize the buf with an estimate - // size, but note that the interim instance comes from a - // global interimPool, so multiple scorch instances indexing - // different docs can lead to low quality estimates - estimateAvgBytesPerDoc := int(float64(s.lastOutSize/s.lastNumDocs) * - NewSegmentBufferNumResultsFactor) - estimateNumResults := int(float64(len(results)+NewSegmentBufferNumResultsBump) * - NewSegmentBufferAvgBytesPerDocFactor) - br.Grow(estimateAvgBytesPerDoc * estimateNumResults) - } - - s.results = results - s.chunkMode = chunkMode - s.w = NewCountHashWriter(&br) - - storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, - err := s.convert() - if err != nil { - return nil, uint64(0), err - } - - sb, err := InitSegmentBase(br.Bytes(), s.w.Sum32(), chunkMode, - s.FieldsMap, s.FieldsInv, uint64(len(results)), - storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets) - - if err == nil && s.reset() == nil { - s.lastNumDocs = len(results) - s.lastOutSize = len(br.Bytes()) - interimPool.Put(s) - } - - return sb, uint64(len(br.Bytes())), err -} - -var interimPool = sync.Pool{New: func() interface{} { return &interim{} }} - -// interim holds temporary working data used while converting from -// analysis results to a zap-encoded segment -type interim struct { - results []index.Document - - chunkMode uint32 - - w *CountHashWriter - - // FieldsMap adds 1 to field id to avoid zero value issues - // name -> field id + 1 - FieldsMap map[string]uint16 - - // FieldsInv is the inverse of FieldsMap - // field id -> name - FieldsInv []string - - // Term dictionaries for each field - // field id -> term -> postings list id + 1 - Dicts []map[string]uint64 - - // Terms for each field, where terms are sorted ascending - // field id -> []term - DictKeys [][]string - - // Fields whose IncludeDocValues is true - // field id -> bool - IncludeDocValues []bool - - // postings id -> bitmap of docNums - Postings []*roaring.Bitmap - - // postings id -> freq/norm's, one for each docNum in postings - FreqNorms [][]interimFreqNorm - freqNormsBacking []interimFreqNorm - - // postings id -> locs, one for each freq - Locs [][]interimLoc - locsBacking []interimLoc - - numTermsPerPostingsList []int // key is postings list id - numLocsPerPostingsList []int // key is postings list id - - builder *vellum.Builder - builderBuf bytes.Buffer - - metaBuf bytes.Buffer - - tmp0 []byte - tmp1 []byte - - lastNumDocs int - lastOutSize int -} - -func (s *interim) reset() (err error) { - s.results = nil - s.chunkMode = 0 - s.w = nil - s.FieldsMap = nil - s.FieldsInv = nil - for i := range s.Dicts { - s.Dicts[i] = nil - } - s.Dicts = s.Dicts[:0] - for i := range s.DictKeys { - s.DictKeys[i] = s.DictKeys[i][:0] - } - s.DictKeys = s.DictKeys[:0] - for i := range s.IncludeDocValues { - s.IncludeDocValues[i] = false - } - s.IncludeDocValues = s.IncludeDocValues[:0] - for _, idn := range s.Postings { - idn.Clear() - } - s.Postings = s.Postings[:0] - s.FreqNorms = s.FreqNorms[:0] - for i := range s.freqNormsBacking { - s.freqNormsBacking[i] = interimFreqNorm{} - } - s.freqNormsBacking = s.freqNormsBacking[:0] - s.Locs = s.Locs[:0] - for i := range s.locsBacking { - s.locsBacking[i] = interimLoc{} - } - s.locsBacking = s.locsBacking[:0] - s.numTermsPerPostingsList = s.numTermsPerPostingsList[:0] - s.numLocsPerPostingsList = s.numLocsPerPostingsList[:0] - s.builderBuf.Reset() - if s.builder != nil { - err = s.builder.Reset(&s.builderBuf) - } - s.metaBuf.Reset() - s.tmp0 = s.tmp0[:0] - s.tmp1 = s.tmp1[:0] - s.lastNumDocs = 0 - s.lastOutSize = 0 - - return err -} - -func (s *interim) grabBuf(size int) []byte { - buf := s.tmp0 - if cap(buf) < size { - buf = make([]byte, size) - s.tmp0 = buf - } - return buf[0:size] -} - -type interimStoredField struct { - vals [][]byte - typs []byte - arrayposs [][]uint64 // array positions -} - -type interimFreqNorm struct { - freq uint64 - norm float32 - numLocs int -} - -type interimLoc struct { - fieldID uint16 - pos uint64 - start uint64 - end uint64 - arrayposs []uint64 -} - -func (s *interim) convert() (uint64, uint64, uint64, []uint64, error) { - s.FieldsMap = map[string]uint16{} - - s.getOrDefineField("_id") // _id field is fieldID 0 - - for _, result := range s.results { - result.VisitComposite(func(field index.CompositeField) { - s.getOrDefineField(field.Name()) - }) - result.VisitFields(func(field index.Field) { - s.getOrDefineField(field.Name()) - }) - } - - sort.Strings(s.FieldsInv[1:]) // keep _id as first field - - for fieldID, fieldName := range s.FieldsInv { - s.FieldsMap[fieldName] = uint16(fieldID + 1) - } - - if cap(s.IncludeDocValues) >= len(s.FieldsInv) { - s.IncludeDocValues = s.IncludeDocValues[:len(s.FieldsInv)] - } else { - s.IncludeDocValues = make([]bool, len(s.FieldsInv)) - } - - s.prepareDicts() - - for _, dict := range s.DictKeys { - sort.Strings(dict) - } - - s.processDocuments() - - storedIndexOffset, err := s.writeStoredFields() - if err != nil { - return 0, 0, 0, nil, err - } - - var fdvIndexOffset uint64 - var dictOffsets []uint64 - - if len(s.results) > 0 { - fdvIndexOffset, dictOffsets, err = s.writeDicts() - if err != nil { - return 0, 0, 0, nil, err - } - } else { - dictOffsets = make([]uint64, len(s.FieldsInv)) - } - - fieldsIndexOffset, err := persistFields(s.FieldsInv, s.w, dictOffsets) - if err != nil { - return 0, 0, 0, nil, err - } - - return storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, nil -} - -func (s *interim) getOrDefineField(fieldName string) int { - fieldIDPlus1, exists := s.FieldsMap[fieldName] - if !exists { - fieldIDPlus1 = uint16(len(s.FieldsInv) + 1) - s.FieldsMap[fieldName] = fieldIDPlus1 - s.FieldsInv = append(s.FieldsInv, fieldName) - - s.Dicts = append(s.Dicts, make(map[string]uint64)) - - n := len(s.DictKeys) - if n < cap(s.DictKeys) { - s.DictKeys = s.DictKeys[:n+1] - s.DictKeys[n] = s.DictKeys[n][:0] - } else { - s.DictKeys = append(s.DictKeys, []string(nil)) - } - } - - return int(fieldIDPlus1 - 1) -} - -// fill Dicts and DictKeys from analysis results -func (s *interim) prepareDicts() { - var pidNext int - - var totTFs int - var totLocs int - - visitField := func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - - dict := s.Dicts[fieldID] - dictKeys := s.DictKeys[fieldID] - - tfs := field.AnalyzedTokenFrequencies() - for term, tf := range tfs { - pidPlus1, exists := dict[term] - if !exists { - pidNext++ - pidPlus1 = uint64(pidNext) - - dict[term] = pidPlus1 - dictKeys = append(dictKeys, term) - - s.numTermsPerPostingsList = append(s.numTermsPerPostingsList, 0) - s.numLocsPerPostingsList = append(s.numLocsPerPostingsList, 0) - } - - pid := pidPlus1 - 1 - - s.numTermsPerPostingsList[pid] += 1 - s.numLocsPerPostingsList[pid] += len(tf.Locations) - - totLocs += len(tf.Locations) - } - - totTFs += len(tfs) - - s.DictKeys[fieldID] = dictKeys - } - - for _, result := range s.results { - // walk each composite field - result.VisitComposite(func(field index.CompositeField) { - visitField(field) - }) - - // walk each field - result.VisitFields(visitField) - } - - numPostingsLists := pidNext - - if cap(s.Postings) >= numPostingsLists { - s.Postings = s.Postings[:numPostingsLists] - } else { - postings := make([]*roaring.Bitmap, numPostingsLists) - copy(postings, s.Postings[:cap(s.Postings)]) - for i := 0; i < numPostingsLists; i++ { - if postings[i] == nil { - postings[i] = roaring.New() - } - } - s.Postings = postings - } - - if cap(s.FreqNorms) >= numPostingsLists { - s.FreqNorms = s.FreqNorms[:numPostingsLists] - } else { - s.FreqNorms = make([][]interimFreqNorm, numPostingsLists) - } - - if cap(s.freqNormsBacking) >= totTFs { - s.freqNormsBacking = s.freqNormsBacking[:totTFs] - } else { - s.freqNormsBacking = make([]interimFreqNorm, totTFs) - } - - freqNormsBacking := s.freqNormsBacking - for pid, numTerms := range s.numTermsPerPostingsList { - s.FreqNorms[pid] = freqNormsBacking[0:0] - freqNormsBacking = freqNormsBacking[numTerms:] - } - - if cap(s.Locs) >= numPostingsLists { - s.Locs = s.Locs[:numPostingsLists] - } else { - s.Locs = make([][]interimLoc, numPostingsLists) - } - - if cap(s.locsBacking) >= totLocs { - s.locsBacking = s.locsBacking[:totLocs] - } else { - s.locsBacking = make([]interimLoc, totLocs) - } - - locsBacking := s.locsBacking - for pid, numLocs := range s.numLocsPerPostingsList { - s.Locs[pid] = locsBacking[0:0] - locsBacking = locsBacking[numLocs:] - } -} - -func (s *interim) processDocuments() { - numFields := len(s.FieldsInv) - reuseFieldLens := make([]int, numFields) - reuseFieldTFs := make([]index.TokenFrequencies, numFields) - - for docNum, result := range s.results { - for i := 0; i < numFields; i++ { // clear these for reuse - reuseFieldLens[i] = 0 - reuseFieldTFs[i] = nil - } - - s.processDocument(uint64(docNum), result, - reuseFieldLens, reuseFieldTFs) - } -} - -func (s *interim) processDocument(docNum uint64, - result index.Document, - fieldLens []int, fieldTFs []index.TokenFrequencies) { - visitField := func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - fieldLens[fieldID] += field.AnalyzedLength() - - existingFreqs := fieldTFs[fieldID] - if existingFreqs != nil { - existingFreqs.MergeAll(field.Name(), field.AnalyzedTokenFrequencies()) - } else { - fieldTFs[fieldID] = field.AnalyzedTokenFrequencies() - } - } - - // walk each composite field - result.VisitComposite(func(field index.CompositeField) { - visitField(field) - }) - - // walk each field - result.VisitFields(visitField) - - // now that it's been rolled up into fieldTFs, walk that - for fieldID, tfs := range fieldTFs { - dict := s.Dicts[fieldID] - norm := math.Float32frombits(uint32(fieldLens[fieldID])) - - for term, tf := range tfs { - pid := dict[term] - 1 - bs := s.Postings[pid] - bs.Add(uint32(docNum)) - - s.FreqNorms[pid] = append(s.FreqNorms[pid], - interimFreqNorm{ - freq: uint64(tf.Frequency()), - norm: norm, - numLocs: len(tf.Locations), - }) - - if len(tf.Locations) > 0 { - locs := s.Locs[pid] - - for _, loc := range tf.Locations { - var locf = uint16(fieldID) - if loc.Field != "" { - locf = uint16(s.getOrDefineField(loc.Field)) - } - var arrayposs []uint64 - if len(loc.ArrayPositions) > 0 { - arrayposs = loc.ArrayPositions - } - locs = append(locs, interimLoc{ - fieldID: locf, - pos: uint64(loc.Position), - start: uint64(loc.Start), - end: uint64(loc.End), - arrayposs: arrayposs, - }) - } - - s.Locs[pid] = locs - } - } - } -} - -func (s *interim) writeStoredFields() ( - storedIndexOffset uint64, err error) { - varBuf := make([]byte, binary.MaxVarintLen64) - metaEncode := func(val uint64) (int, error) { - wb := binary.PutUvarint(varBuf, val) - return s.metaBuf.Write(varBuf[:wb]) - } - - data, compressed := s.tmp0[:0], s.tmp1[:0] - defer func() { s.tmp0, s.tmp1 = data, compressed }() - - // keyed by docNum - docStoredOffsets := make([]uint64, len(s.results)) - - // keyed by fieldID, for the current doc in the loop - docStoredFields := map[uint16]interimStoredField{} - - for docNum, result := range s.results { - for fieldID := range docStoredFields { // reset for next doc - delete(docStoredFields, fieldID) - } - - var validationErr error - result.VisitFields(func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - - if field.Options().IsStored() { - isf := docStoredFields[fieldID] - isf.vals = append(isf.vals, field.Value()) - isf.typs = append(isf.typs, field.EncodedFieldType()) - isf.arrayposs = append(isf.arrayposs, field.ArrayPositions()) - docStoredFields[fieldID] = isf - } - - if field.Options().IncludeDocValues() { - s.IncludeDocValues[fieldID] = true - } - - err := ValidateDocFields(field) - if err != nil && validationErr == nil { - validationErr = err - } - }) - if validationErr != nil { - return 0, validationErr - } - - var curr int - - s.metaBuf.Reset() - data = data[:0] - - // _id field special case optimizes ExternalID() lookups - idFieldVal := docStoredFields[uint16(0)].vals[0] - _, err = metaEncode(uint64(len(idFieldVal))) - if err != nil { - return 0, err - } - - // handle non-"_id" fields - for fieldID := 1; fieldID < len(s.FieldsInv); fieldID++ { - isf, exists := docStoredFields[uint16(fieldID)] - if exists { - curr, data, err = persistStoredFieldValues( - fieldID, isf.vals, isf.typs, isf.arrayposs, - curr, metaEncode, data) - if err != nil { - return 0, err - } - } - } - - metaBytes := s.metaBuf.Bytes() - - compressed = snappy.Encode(compressed[:cap(compressed)], data) - - docStoredOffsets[docNum] = uint64(s.w.Count()) - - _, err := writeUvarints(s.w, - uint64(len(metaBytes)), - uint64(len(idFieldVal)+len(compressed))) - if err != nil { - return 0, err - } - - _, err = s.w.Write(metaBytes) - if err != nil { - return 0, err - } - - _, err = s.w.Write(idFieldVal) - if err != nil { - return 0, err - } - - _, err = s.w.Write(compressed) - if err != nil { - return 0, err - } - } - - storedIndexOffset = uint64(s.w.Count()) - - for _, docStoredOffset := range docStoredOffsets { - err = binary.Write(s.w, binary.BigEndian, docStoredOffset) - if err != nil { - return 0, err - } - } - - return storedIndexOffset, nil -} - -func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err error) { - dictOffsets = make([]uint64, len(s.FieldsInv)) - - fdvOffsetsStart := make([]uint64, len(s.FieldsInv)) - fdvOffsetsEnd := make([]uint64, len(s.FieldsInv)) - - buf := s.grabBuf(binary.MaxVarintLen64) - - // these int coders are initialized with chunk size 1024 - // however this will be reset to the correct chunk size - // while processing each individual field-term section - tfEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1)) - locEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1)) - - var docTermMap [][]byte - - if s.builder == nil { - s.builder, err = vellum.New(&s.builderBuf, nil) - if err != nil { - return 0, nil, err - } - } - - for fieldID, terms := range s.DictKeys { - if cap(docTermMap) < len(s.results) { - docTermMap = make([][]byte, len(s.results)) - } else { - docTermMap = docTermMap[0:len(s.results)] - for docNum := range docTermMap { // reset the docTermMap - docTermMap[docNum] = docTermMap[docNum][:0] - } - } - - dict := s.Dicts[fieldID] - - for _, term := range terms { // terms are already sorted - pid := dict[term] - 1 - - postingsBS := s.Postings[pid] - - freqNorms := s.FreqNorms[pid] - freqNormOffset := 0 - - locs := s.Locs[pid] - locOffset := 0 - - chunkSize, err := getChunkSize(s.chunkMode, postingsBS.GetCardinality(), uint64(len(s.results))) - if err != nil { - return 0, nil, err - } - tfEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) - locEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) - - postingsItr := postingsBS.Iterator() - for postingsItr.HasNext() { - docNum := uint64(postingsItr.Next()) - - freqNorm := freqNorms[freqNormOffset] - - // check if freq/norm is enabled - if freqNorm.freq > 0 { - err = tfEncoder.Add(docNum, - encodeFreqHasLocs(freqNorm.freq, freqNorm.numLocs > 0), - uint64(math.Float32bits(freqNorm.norm))) - } else { - // if disabled, then skip the norm part - err = tfEncoder.Add(docNum, - encodeFreqHasLocs(freqNorm.freq, freqNorm.numLocs > 0)) - } - if err != nil { - return 0, nil, err - } - - if freqNorm.numLocs > 0 { - numBytesLocs := 0 - for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { - numBytesLocs += totalUvarintBytes( - uint64(loc.fieldID), loc.pos, loc.start, loc.end, - uint64(len(loc.arrayposs)), loc.arrayposs) - } - - err = locEncoder.Add(docNum, uint64(numBytesLocs)) - if err != nil { - return 0, nil, err - } - - for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { - err = locEncoder.Add(docNum, - uint64(loc.fieldID), loc.pos, loc.start, loc.end, - uint64(len(loc.arrayposs))) - if err != nil { - return 0, nil, err - } - - err = locEncoder.Add(docNum, loc.arrayposs...) - if err != nil { - return 0, nil, err - } - } - - locOffset += freqNorm.numLocs - } - - freqNormOffset++ - - docTermMap[docNum] = append( - append(docTermMap[docNum], term...), - termSeparator) - } - - tfEncoder.Close() - locEncoder.Close() - - postingsOffset, err := - writePostings(postingsBS, tfEncoder, locEncoder, nil, s.w, buf) - if err != nil { - return 0, nil, err - } - - if postingsOffset > uint64(0) { - err = s.builder.Insert([]byte(term), postingsOffset) - if err != nil { - return 0, nil, err - } - } - - tfEncoder.Reset() - locEncoder.Reset() - } - - err = s.builder.Close() - if err != nil { - return 0, nil, err - } - - // record where this dictionary starts - dictOffsets[fieldID] = uint64(s.w.Count()) - - vellumData := s.builderBuf.Bytes() - - // write out the length of the vellum data - n := binary.PutUvarint(buf, uint64(len(vellumData))) - _, err = s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - - // write this vellum to disk - _, err = s.w.Write(vellumData) - if err != nil { - return 0, nil, err - } - - // reset vellum for reuse - s.builderBuf.Reset() - - err = s.builder.Reset(&s.builderBuf) - if err != nil { - return 0, nil, err - } - - // write the field doc values - // NOTE: doc values continue to use legacy chunk mode - chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0) - if err != nil { - return 0, nil, err - } - fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false) - if s.IncludeDocValues[fieldID] { - for docNum, docTerms := range docTermMap { - if len(docTerms) > 0 { - err = fdvEncoder.Add(uint64(docNum), docTerms) - if err != nil { - return 0, nil, err - } - } - } - err = fdvEncoder.Close() - if err != nil { - return 0, nil, err - } - - fdvOffsetsStart[fieldID] = uint64(s.w.Count()) - - _, err = fdvEncoder.Write() - if err != nil { - return 0, nil, err - } - - fdvOffsetsEnd[fieldID] = uint64(s.w.Count()) - - fdvEncoder.Reset() - } else { - fdvOffsetsStart[fieldID] = fieldNotUninverted - fdvOffsetsEnd[fieldID] = fieldNotUninverted - } - } - - fdvIndexOffset = uint64(s.w.Count()) - - for i := 0; i < len(fdvOffsetsStart); i++ { - n := binary.PutUvarint(buf, fdvOffsetsStart[i]) - _, err := s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - n = binary.PutUvarint(buf, fdvOffsetsEnd[i]) - _, err = s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - } - - return fdvIndexOffset, dictOffsets, nil -} - -// returns the total # of bytes needed to encode the given uint64's -// into binary.PutUVarint() encoding -func totalUvarintBytes(a, b, c, d, e uint64, more []uint64) (n int) { - n = numUvarintBytes(a) - n += numUvarintBytes(b) - n += numUvarintBytes(c) - n += numUvarintBytes(d) - n += numUvarintBytes(e) - for _, v := range more { - n += numUvarintBytes(v) - } - return n -} - -// returns # of bytes needed to encode x in binary.PutUvarint() encoding -func numUvarintBytes(x uint64) (n int) { - for x >= 0x80 { - x >>= 7 - n++ - } - return n + 1 -} diff --git a/vendor/github.com/blevesearch/zapx/v15/plugin.go b/vendor/github.com/blevesearch/zapx/v15/plugin.go deleted file mode 100644 index f67297ec2..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/plugin.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -// ZapPlugin implements the Plugin interface of -// the blevesearch/scorch_segment_api pkg -type ZapPlugin struct{} - -func (*ZapPlugin) Type() string { - return Type -} - -func (*ZapPlugin) Version() uint32 { - return Version -} diff --git a/vendor/github.com/blevesearch/zapx/v15/posting.go b/vendor/github.com/blevesearch/zapx/v15/posting.go deleted file mode 100644 index b1d19e532..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/posting.go +++ /dev/null @@ -1,869 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "fmt" - "math" - "reflect" - - "github.com/RoaringBitmap/roaring" - segment "github.com/blevesearch/scorch_segment_api/v2" -) - -var reflectStaticSizePostingsList int -var reflectStaticSizePostingsIterator int -var reflectStaticSizePosting int -var reflectStaticSizeLocation int - -func init() { - var pl PostingsList - reflectStaticSizePostingsList = int(reflect.TypeOf(pl).Size()) - var pi PostingsIterator - reflectStaticSizePostingsIterator = int(reflect.TypeOf(pi).Size()) - var p Posting - reflectStaticSizePosting = int(reflect.TypeOf(p).Size()) - var l Location - reflectStaticSizeLocation = int(reflect.TypeOf(l).Size()) -} - -// FST or vellum value (uint64) encoding is determined by the top two -// highest-order or most significant bits... -// -// encoding : MSB -// name : 63 62 61...to...bit #0 (LSB) -// ----------+---+---+--------------------------------------------------- -// general : 0 | 0 | 62-bits of postingsOffset. -// ~ : 0 | 1 | reserved for future. -// 1-hit : 1 | 0 | 31-bits of positive float31 norm | 31-bits docNum. -// ~ : 1 | 1 | reserved for future. -// -// Encoding "general" is able to handle all cases, where the -// postingsOffset points to more information about the postings for -// the term. -// -// Encoding "1-hit" is used to optimize a commonly seen case when a -// term has only a single hit. For example, a term in the _id field -// will have only 1 hit. The "1-hit" encoding is used for a term -// in a field when... -// -// - term vector info is disabled for that field; -// - and, the term appears in only a single doc for that field; -// - and, the term's freq is exactly 1 in that single doc for that field; -// - and, the docNum must fit into 31-bits; -// -// Otherwise, the "general" encoding is used instead. -// -// In the "1-hit" encoding, the field in that single doc may have -// other terms, which is supported in the "1-hit" encoding by the -// positive float31 norm. - -const FSTValEncodingMask = uint64(0xc000000000000000) -const FSTValEncodingGeneral = uint64(0x0000000000000000) -const FSTValEncoding1Hit = uint64(0x8000000000000000) - -func FSTValEncode1Hit(docNum uint64, normBits uint64) uint64 { - return FSTValEncoding1Hit | ((mask31Bits & normBits) << 31) | (mask31Bits & docNum) -} - -func FSTValDecode1Hit(v uint64) (docNum uint64, normBits uint64) { - return (mask31Bits & v), (mask31Bits & (v >> 31)) -} - -const mask31Bits = uint64(0x000000007fffffff) - -func under32Bits(x uint64) bool { - return x <= mask31Bits -} - -const DocNum1HitFinished = math.MaxUint64 - -var NormBits1Hit = uint64(1) - -// PostingsList is an in-memory representation of a postings list -type PostingsList struct { - sb *SegmentBase - postingsOffset uint64 - freqOffset uint64 - locOffset uint64 - postings *roaring.Bitmap - except *roaring.Bitmap - - // when normBits1Hit != 0, then this postings list came from a - // 1-hit encoding, and only the docNum1Hit & normBits1Hit apply - docNum1Hit uint64 - normBits1Hit uint64 - - chunkSize uint64 -} - -// represents an immutable, empty postings list -var emptyPostingsList = &PostingsList{} - -func (p *PostingsList) Size() int { - sizeInBytes := reflectStaticSizePostingsList + SizeOfPtr - - if p.except != nil { - sizeInBytes += int(p.except.GetSizeInBytes()) - } - - return sizeInBytes -} - -func (p *PostingsList) OrInto(receiver *roaring.Bitmap) { - if p.normBits1Hit != 0 { - receiver.Add(uint32(p.docNum1Hit)) - return - } - - if p.postings != nil { - receiver.Or(p.postings) - } -} - -// Iterator returns an iterator for this postings list -func (p *PostingsList) Iterator(includeFreq, includeNorm, includeLocs bool, - prealloc segment.PostingsIterator) segment.PostingsIterator { - if p.normBits1Hit == 0 && p.postings == nil { - return emptyPostingsIterator - } - - var preallocPI *PostingsIterator - pi, ok := prealloc.(*PostingsIterator) - if ok && pi != nil { - preallocPI = pi - } - if preallocPI == emptyPostingsIterator { - preallocPI = nil - } - - return p.iterator(includeFreq, includeNorm, includeLocs, preallocPI) -} - -func (p *PostingsList) iterator(includeFreq, includeNorm, includeLocs bool, - rv *PostingsIterator) *PostingsIterator { - if rv == nil { - rv = &PostingsIterator{} - } else { - freqNormReader := rv.freqNormReader - if freqNormReader != nil { - freqNormReader.reset() - } - - locReader := rv.locReader - if locReader != nil { - locReader.reset() - } - - nextLocs := rv.nextLocs[:0] - nextSegmentLocs := rv.nextSegmentLocs[:0] - - buf := rv.buf - - *rv = PostingsIterator{} // clear the struct - - rv.freqNormReader = freqNormReader - rv.locReader = locReader - - rv.nextLocs = nextLocs - rv.nextSegmentLocs = nextSegmentLocs - - rv.buf = buf - } - - rv.postings = p - rv.includeFreqNorm = includeFreq || includeNorm || includeLocs - rv.includeLocs = includeLocs - - if p.normBits1Hit != 0 { - // "1-hit" encoding - rv.docNum1Hit = p.docNum1Hit - rv.normBits1Hit = p.normBits1Hit - - if p.except != nil && p.except.Contains(uint32(rv.docNum1Hit)) { - rv.docNum1Hit = DocNum1HitFinished - } - - return rv - } - - // "general" encoding, check if empty - if p.postings == nil { - return rv - } - - // initialize freq chunk reader - if rv.includeFreqNorm { - rv.freqNormReader = newChunkedIntDecoder(p.sb.mem, p.freqOffset, rv.freqNormReader) - } - - // initialize the loc chunk reader - if rv.includeLocs { - rv.locReader = newChunkedIntDecoder(p.sb.mem, p.locOffset, rv.locReader) - } - - rv.all = p.postings.Iterator() - if p.except != nil { - rv.ActualBM = roaring.AndNot(p.postings, p.except) - rv.Actual = rv.ActualBM.Iterator() - } else { - rv.ActualBM = p.postings - rv.Actual = rv.all // Optimize to use same iterator for all & Actual. - } - - return rv -} - -// Count returns the number of items on this postings list -func (p *PostingsList) Count() uint64 { - var n, e uint64 - if p.normBits1Hit != 0 { - n = 1 - if p.except != nil && p.except.Contains(uint32(p.docNum1Hit)) { - e = 1 - } - } else if p.postings != nil { - n = p.postings.GetCardinality() - if p.except != nil { - e = p.postings.AndCardinality(p.except) - } - } - return n - e -} - -func (rv *PostingsList) read(postingsOffset uint64, d *Dictionary) error { - rv.postingsOffset = postingsOffset - - // handle "1-hit" encoding special case - if rv.postingsOffset&FSTValEncodingMask == FSTValEncoding1Hit { - return rv.init1Hit(postingsOffset) - } - - // read the location of the freq/norm details - var n uint64 - var read int - - rv.freqOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+binary.MaxVarintLen64]) - n += uint64(read) - - rv.locOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - var postingsLen uint64 - postingsLen, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - roaringBytes := d.sb.mem[postingsOffset+n : postingsOffset+n+postingsLen] - - if rv.postings == nil { - rv.postings = roaring.NewBitmap() - } - _, err := rv.postings.FromBuffer(roaringBytes) - if err != nil { - return fmt.Errorf("error loading roaring bitmap: %v", err) - } - - rv.chunkSize, err = getChunkSize(d.sb.chunkMode, - rv.postings.GetCardinality(), d.sb.numDocs) - if err != nil { - return err - } - - return nil -} - -func (rv *PostingsList) init1Hit(fstVal uint64) error { - docNum, normBits := FSTValDecode1Hit(fstVal) - - rv.docNum1Hit = docNum - rv.normBits1Hit = normBits - - return nil -} - -// PostingsIterator provides a way to iterate through the postings list -type PostingsIterator struct { - postings *PostingsList - all roaring.IntPeekable - Actual roaring.IntPeekable - ActualBM *roaring.Bitmap - - currChunk uint32 - freqNormReader *chunkedIntDecoder - locReader *chunkedIntDecoder - - next Posting // reused across Next() calls - nextLocs []Location // reused across Next() calls - nextSegmentLocs []segment.Location // reused across Next() calls - - docNum1Hit uint64 - normBits1Hit uint64 - - buf []byte - - includeFreqNorm bool - includeLocs bool -} - -var emptyPostingsIterator = &PostingsIterator{} - -func (i *PostingsIterator) Size() int { - sizeInBytes := reflectStaticSizePostingsIterator + SizeOfPtr + - i.next.Size() - // account for freqNormReader, locReader if we start using this. - for _, entry := range i.nextLocs { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -func (i *PostingsIterator) loadChunk(chunk int) error { - if i.includeFreqNorm { - err := i.freqNormReader.loadChunk(chunk) - if err != nil { - return err - } - } - - if i.includeLocs { - err := i.locReader.loadChunk(chunk) - if err != nil { - return err - } - } - - i.currChunk = uint32(chunk) - return nil -} - -func (i *PostingsIterator) readFreqNormHasLocs() (uint64, uint64, bool, error) { - if i.normBits1Hit != 0 { - return 1, i.normBits1Hit, false, nil - } - - freqHasLocs, err := i.freqNormReader.readUvarint() - if err != nil { - return 0, 0, false, fmt.Errorf("error reading frequency: %v", err) - } - - freq, hasLocs := decodeFreqHasLocs(freqHasLocs) - if freq == 0 { - return freq, 0, hasLocs, nil - } - - normBits, err := i.freqNormReader.readUvarint() - if err != nil { - return 0, 0, false, fmt.Errorf("error reading norm: %v", err) - } - - return freq, normBits, hasLocs, nil -} - -func (i *PostingsIterator) skipFreqNormReadHasLocs() (bool, error) { - if i.normBits1Hit != 0 { - return false, nil - } - - freqHasLocs, err := i.freqNormReader.readUvarint() - if err != nil { - return false, fmt.Errorf("error reading freqHasLocs: %v", err) - } - - freq, hasLocs := decodeFreqHasLocs(freqHasLocs) - if freq == 0 { - return hasLocs, nil - } - - i.freqNormReader.SkipUvarint() // Skip normBits. - - return hasLocs, nil // See decodeFreqHasLocs() / hasLocs. -} - -func encodeFreqHasLocs(freq uint64, hasLocs bool) uint64 { - rv := freq << 1 - if hasLocs { - rv = rv | 0x01 // 0'th LSB encodes whether there are locations - } - return rv -} - -func decodeFreqHasLocs(freqHasLocs uint64) (uint64, bool) { - freq := freqHasLocs >> 1 - hasLocs := freqHasLocs&0x01 != 0 - return freq, hasLocs -} - -// readLocation processes all the integers on the stream representing a single -// location. -func (i *PostingsIterator) readLocation(l *Location) error { - // read off field - fieldID, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location field: %v", err) - } - // read off pos - pos, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location pos: %v", err) - } - // read off start - start, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location start: %v", err) - } - // read off end - end, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location end: %v", err) - } - // read off num array pos - numArrayPos, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location num array pos: %v", err) - } - - l.field = i.postings.sb.fieldsInv[fieldID] - l.pos = pos - l.start = start - l.end = end - - if cap(l.ap) < int(numArrayPos) { - l.ap = make([]uint64, int(numArrayPos)) - } else { - l.ap = l.ap[:int(numArrayPos)] - } - - // read off array positions - for k := 0; k < int(numArrayPos); k++ { - ap, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading array position: %v", err) - } - - l.ap[k] = ap - } - - return nil -} - -// Next returns the next posting on the postings list, or nil at the end -func (i *PostingsIterator) Next() (segment.Posting, error) { - return i.nextAtOrAfter(0) -} - -// Advance returns the posting at the specified docNum or it is not present -// the next posting, or if the end is reached, nil -func (i *PostingsIterator) Advance(docNum uint64) (segment.Posting, error) { - return i.nextAtOrAfter(docNum) -} - -// Next returns the next posting on the postings list, or nil at the end -func (i *PostingsIterator) nextAtOrAfter(atOrAfter uint64) (segment.Posting, error) { - docNum, exists, err := i.nextDocNumAtOrAfter(atOrAfter) - if err != nil || !exists { - return nil, err - } - - i.next = Posting{} // clear the struct - rv := &i.next - rv.docNum = docNum - - if !i.includeFreqNorm { - return rv, nil - } - - var normBits uint64 - var hasLocs bool - - rv.freq, normBits, hasLocs, err = i.readFreqNormHasLocs() - if err != nil { - return nil, err - } - - rv.norm = math.Float32frombits(uint32(normBits)) - - if i.includeLocs && hasLocs { - // prepare locations into reused slices, where we assume - // rv.freq >= "number of locs", since in a composite field, - // some component fields might have their IncludeTermVector - // flags disabled while other component fields are enabled - if rv.freq > 0 { - if cap(i.nextLocs) >= int(rv.freq) { - i.nextLocs = i.nextLocs[0:rv.freq] - } else { - i.nextLocs = make([]Location, rv.freq, rv.freq*2) - } - if cap(i.nextSegmentLocs) < int(rv.freq) { - i.nextSegmentLocs = make([]segment.Location, rv.freq, rv.freq*2) - } - rv.locs = i.nextSegmentLocs[:0] - } - - numLocsBytes, err := i.locReader.readUvarint() - if err != nil { - return nil, fmt.Errorf("error reading location numLocsBytes: %v", err) - } - - j := 0 - var nextLoc *Location - startBytesRemaining := i.locReader.Len() // # bytes remaining in the locReader - for startBytesRemaining-i.locReader.Len() < int(numLocsBytes) { - if len(i.nextLocs) > j { - nextLoc = &i.nextLocs[j] - } else { - nextLoc = &Location{} - } - - err := i.readLocation(nextLoc) - if err != nil { - return nil, err - } - - rv.locs = append(rv.locs, nextLoc) - j++ - } - } - - return rv, nil -} - -// nextDocNum returns the next docNum on the postings list, and also -// sets up the currChunk / loc related fields of the iterator. -func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool, error) { - if i.normBits1Hit != 0 { - if i.docNum1Hit == DocNum1HitFinished { - return 0, false, nil - } - if i.docNum1Hit < atOrAfter { - // advanced past our 1-hit - i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum - return 0, false, nil - } - docNum := i.docNum1Hit - i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum - return docNum, true, nil - } - - if i.Actual == nil || !i.Actual.HasNext() { - return 0, false, nil - } - - if i.postings == nil || i.postings.postings == i.ActualBM { - return i.nextDocNumAtOrAfterClean(atOrAfter) - } - - i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) - - if !i.Actual.HasNext() { - // couldn't find anything - return 0, false, nil - } - - n := i.Actual.Next() - allN := i.all.Next() - nChunk := n / uint32(i.postings.chunkSize) - - // when allN becomes >= to here, then allN is in the same chunk as nChunk. - allNReachesNChunk := nChunk * uint32(i.postings.chunkSize) - - // n is the next actual hit (excluding some postings), and - // allN is the next hit in the full postings, and - // if they don't match, move 'all' forwards until they do - for allN != n { - // we've reached same chunk, so move the freq/norm/loc decoders forward - if i.includeFreqNorm && allN >= allNReachesNChunk { - err := i.currChunkNext(nChunk) - if err != nil { - return 0, false, err - } - } - - allN = i.all.Next() - } - - if i.includeFreqNorm && (i.currChunk != nChunk || i.freqNormReader.isNil()) { - err := i.loadChunk(int(nChunk)) - if err != nil { - return 0, false, fmt.Errorf("error loading chunk: %v", err) - } - } - - return uint64(n), true, nil -} - -var freqHasLocs1Hit = encodeFreqHasLocs(1, false) - -// nextBytes returns the docNum and the encoded freq & loc bytes for -// the next posting -func (i *PostingsIterator) nextBytes() ( - docNumOut uint64, freq uint64, normBits uint64, - bytesFreqNorm []byte, bytesLoc []byte, err error) { - docNum, exists, err := i.nextDocNumAtOrAfter(0) - if err != nil || !exists { - return 0, 0, 0, nil, nil, err - } - - if i.normBits1Hit != 0 { - if i.buf == nil { - i.buf = make([]byte, binary.MaxVarintLen64*2) - } - n := binary.PutUvarint(i.buf, freqHasLocs1Hit) - n += binary.PutUvarint(i.buf[n:], i.normBits1Hit) - return docNum, uint64(1), i.normBits1Hit, i.buf[:n], nil, nil - } - - startFreqNorm := i.freqNormReader.remainingLen() - - var hasLocs bool - - freq, normBits, hasLocs, err = i.readFreqNormHasLocs() - if err != nil { - return 0, 0, 0, nil, nil, err - } - - endFreqNorm := i.freqNormReader.remainingLen() - bytesFreqNorm = i.freqNormReader.readBytes(startFreqNorm, endFreqNorm) - - if hasLocs { - startLoc := i.locReader.remainingLen() - - numLocsBytes, err := i.locReader.readUvarint() - if err != nil { - return 0, 0, 0, nil, nil, - fmt.Errorf("error reading location nextBytes numLocs: %v", err) - } - - // skip over all the location bytes - i.locReader.SkipBytes(int(numLocsBytes)) - - endLoc := i.locReader.remainingLen() - bytesLoc = i.locReader.readBytes(startLoc, endLoc) - } - - return docNum, freq, normBits, bytesFreqNorm, bytesLoc, nil -} - -// optimization when the postings list is "clean" (e.g., no updates & -// no deletions) where the all bitmap is the same as the actual bitmap -func (i *PostingsIterator) nextDocNumAtOrAfterClean( - atOrAfter uint64) (uint64, bool, error) { - if !i.includeFreqNorm { - i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) - - if !i.Actual.HasNext() { - return 0, false, nil // couldn't find anything - } - - return uint64(i.Actual.Next()), true, nil - } - - // freq-norm's needed, so maintain freq-norm chunk reader - sameChunkNexts := 0 // # of times we called Next() in the same chunk - n := i.Actual.Next() - nChunk := n / uint32(i.postings.chunkSize) - - for uint64(n) < atOrAfter && i.Actual.HasNext() { - n = i.Actual.Next() - - nChunkPrev := nChunk - nChunk = n / uint32(i.postings.chunkSize) - - if nChunk != nChunkPrev { - sameChunkNexts = 0 - } else { - sameChunkNexts += 1 - } - } - - if uint64(n) < atOrAfter { - // couldn't find anything - return 0, false, nil - } - - for j := 0; j < sameChunkNexts; j++ { - err := i.currChunkNext(nChunk) - if err != nil { - return 0, false, fmt.Errorf("error optimized currChunkNext: %v", err) - } - } - - if i.currChunk != nChunk || i.freqNormReader.isNil() { - err := i.loadChunk(int(nChunk)) - if err != nil { - return 0, false, fmt.Errorf("error loading chunk: %v", err) - } - } - - return uint64(n), true, nil -} - -func (i *PostingsIterator) currChunkNext(nChunk uint32) error { - if i.currChunk != nChunk || i.freqNormReader.isNil() { - err := i.loadChunk(int(nChunk)) - if err != nil { - return fmt.Errorf("error loading chunk: %v", err) - } - } - - // read off freq/offsets even though we don't care about them - hasLocs, err := i.skipFreqNormReadHasLocs() - if err != nil { - return err - } - - if i.includeLocs && hasLocs { - numLocsBytes, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location numLocsBytes: %v", err) - } - - // skip over all the location bytes - i.locReader.SkipBytes(int(numLocsBytes)) - } - - return nil -} - -// DocNum1Hit returns the docNum and true if this is "1-hit" optimized -// and the docNum is available. -func (p *PostingsIterator) DocNum1Hit() (uint64, bool) { - if p.normBits1Hit != 0 && p.docNum1Hit != DocNum1HitFinished { - return p.docNum1Hit, true - } - return 0, false -} - -// ActualBitmap returns the underlying actual bitmap -// which can be used up the stack for optimizations -func (p *PostingsIterator) ActualBitmap() *roaring.Bitmap { - return p.ActualBM -} - -// ReplaceActual replaces the ActualBM with the provided -// bitmap -func (p *PostingsIterator) ReplaceActual(abm *roaring.Bitmap) { - p.ActualBM = abm - p.Actual = abm.Iterator() -} - -// PostingsIteratorFromBitmap constructs a PostingsIterator given an -// "actual" bitmap. -func PostingsIteratorFromBitmap(bm *roaring.Bitmap, - includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { - return &PostingsIterator{ - ActualBM: bm, - Actual: bm.Iterator(), - includeFreqNorm: includeFreqNorm, - includeLocs: includeLocs, - }, nil -} - -// PostingsIteratorFrom1Hit constructs a PostingsIterator given a -// 1-hit docNum. -func PostingsIteratorFrom1Hit(docNum1Hit uint64, - includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { - return &PostingsIterator{ - docNum1Hit: docNum1Hit, - normBits1Hit: NormBits1Hit, - includeFreqNorm: includeFreqNorm, - includeLocs: includeLocs, - }, nil -} - -// Posting is a single entry in a postings list -type Posting struct { - docNum uint64 - freq uint64 - norm float32 - locs []segment.Location -} - -func (p *Posting) Size() int { - sizeInBytes := reflectStaticSizePosting - - for _, entry := range p.locs { - sizeInBytes += entry.Size() - } - - return sizeInBytes -} - -// Number returns the document number of this posting in this segment -func (p *Posting) Number() uint64 { - return p.docNum -} - -// Frequency returns the frequencies of occurrence of this term in this doc/field -func (p *Posting) Frequency() uint64 { - return p.freq -} - -// Norm returns the normalization factor for this posting -func (p *Posting) Norm() float64 { - return float64(float32(1.0 / math.Sqrt(float64(math.Float32bits(p.norm))))) -} - -// Locations returns the location information for each occurrence -func (p *Posting) Locations() []segment.Location { - return p.locs -} - -// NormUint64 returns the norm value as uint64 -func (p *Posting) NormUint64() uint64 { - return uint64(math.Float32bits(p.norm)) -} - -// Location represents the location of a single occurrence -type Location struct { - field string - pos uint64 - start uint64 - end uint64 - ap []uint64 -} - -func (l *Location) Size() int { - return reflectStaticSizeLocation + - len(l.field) + - len(l.ap)*SizeOfUint64 -} - -// Field returns the name of the field (useful in composite fields to know -// which original field the value came from) -func (l *Location) Field() string { - return l.field -} - -// Start returns the start byte offset of this occurrence -func (l *Location) Start() uint64 { - return l.start -} - -// End returns the end byte offset of this occurrence -func (l *Location) End() uint64 { - return l.end -} - -// Pos returns the 1-based phrase position of this occurrence -func (l *Location) Pos() uint64 { - return l.pos -} - -// ArrayPositions returns the array position vector associated with this occurrence -func (l *Location) ArrayPositions() []uint64 { - return l.ap -} diff --git a/vendor/github.com/blevesearch/zapx/v15/read.go b/vendor/github.com/blevesearch/zapx/v15/read.go deleted file mode 100644 index e47d4c6ab..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/read.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import "encoding/binary" - -func (s *SegmentBase) getDocStoredMetaAndCompressed(docNum uint64) ([]byte, []byte) { - _, storedOffset, n, metaLen, dataLen := s.getDocStoredOffsets(docNum) - - meta := s.mem[storedOffset+n : storedOffset+n+metaLen] - data := s.mem[storedOffset+n+metaLen : storedOffset+n+metaLen+dataLen] - - return meta, data -} - -func (s *SegmentBase) getDocStoredOffsets(docNum uint64) ( - uint64, uint64, uint64, uint64, uint64) { - indexOffset := s.storedIndexOffset + (8 * docNum) - - storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) - - var n uint64 - - metaLen, read := binary.Uvarint(s.mem[storedOffset : storedOffset+binary.MaxVarintLen64]) - n += uint64(read) - - dataLen, read := binary.Uvarint(s.mem[storedOffset+n : storedOffset+n+binary.MaxVarintLen64]) - n += uint64(read) - - return indexOffset, storedOffset, n, metaLen, dataLen -} diff --git a/vendor/github.com/blevesearch/zapx/v15/segment.go b/vendor/github.com/blevesearch/zapx/v15/segment.go deleted file mode 100644 index bc29f3f4a..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/segment.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "os" - "sync" - "unsafe" - - "github.com/RoaringBitmap/roaring" - mmap "github.com/blevesearch/mmap-go" - segment "github.com/blevesearch/scorch_segment_api/v2" - "github.com/blevesearch/vellum" - "github.com/golang/snappy" -) - -var reflectStaticSizeSegmentBase int - -func init() { - var sb SegmentBase - reflectStaticSizeSegmentBase = int(unsafe.Sizeof(sb)) -} - -// Open returns a zap impl of a segment -func (*ZapPlugin) Open(path string) (segment.Segment, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - mm, err := mmap.Map(f, mmap.RDONLY, 0) - if err != nil { - // mmap failed, try to close the file - _ = f.Close() - return nil, err - } - - rv := &Segment{ - SegmentBase: SegmentBase{ - mem: mm[0 : len(mm)-FooterSize], - fieldsMap: make(map[string]uint16), - fieldDvReaders: make(map[uint16]*docValueReader), - fieldFSTs: make(map[uint16]*vellum.FST), - }, - f: f, - mm: mm, - path: path, - refs: 1, - } - rv.SegmentBase.updateSize() - - err = rv.loadConfig() - if err != nil { - _ = rv.Close() - return nil, err - } - - err = rv.loadFields() - if err != nil { - _ = rv.Close() - return nil, err - } - - err = rv.loadDvReaders() - if err != nil { - _ = rv.Close() - return nil, err - } - - return rv, nil -} - -// SegmentBase is a memory only, read-only implementation of the -// segment.Segment interface, using zap's data representation. -type SegmentBase struct { - mem []byte - memCRC uint32 - chunkMode uint32 - fieldsMap map[string]uint16 // fieldName -> fieldID+1 - fieldsInv []string // fieldID -> fieldName - numDocs uint64 - storedIndexOffset uint64 - fieldsIndexOffset uint64 - docValueOffset uint64 - dictLocs []uint64 - fieldDvReaders map[uint16]*docValueReader // naive chunk cache per field - fieldDvNames []string // field names cached in fieldDvReaders - size uint64 - - m sync.Mutex - fieldFSTs map[uint16]*vellum.FST -} - -func (sb *SegmentBase) Size() int { - return int(sb.size) -} - -func (sb *SegmentBase) updateSize() { - sizeInBytes := reflectStaticSizeSegmentBase + - cap(sb.mem) - - // fieldsMap - for k := range sb.fieldsMap { - sizeInBytes += (len(k) + SizeOfString) + SizeOfUint16 - } - - // fieldsInv, dictLocs - for _, entry := range sb.fieldsInv { - sizeInBytes += len(entry) + SizeOfString - } - sizeInBytes += len(sb.dictLocs) * SizeOfUint64 - - // fieldDvReaders - for _, v := range sb.fieldDvReaders { - sizeInBytes += SizeOfUint16 + SizeOfPtr - if v != nil { - sizeInBytes += v.size() - } - } - - sb.size = uint64(sizeInBytes) -} - -func (sb *SegmentBase) AddRef() {} -func (sb *SegmentBase) DecRef() (err error) { return nil } -func (sb *SegmentBase) Close() (err error) { return nil } - -// Segment implements a persisted segment.Segment interface, by -// embedding an mmap()'ed SegmentBase. -type Segment struct { - SegmentBase - - f *os.File - mm mmap.MMap - path string - version uint32 - crc uint32 - - m sync.Mutex // Protects the fields that follow. - refs int64 -} - -func (s *Segment) Size() int { - // 8 /* size of file pointer */ - // 4 /* size of version -> uint32 */ - // 4 /* size of crc -> uint32 */ - sizeOfUints := 16 - - sizeInBytes := (len(s.path) + SizeOfString) + sizeOfUints - - // mutex, refs -> int64 - sizeInBytes += 16 - - // do not include the mmap'ed part - return sizeInBytes + s.SegmentBase.Size() - cap(s.mem) -} - -func (s *Segment) AddRef() { - s.m.Lock() - s.refs++ - s.m.Unlock() -} - -func (s *Segment) DecRef() (err error) { - s.m.Lock() - s.refs-- - if s.refs == 0 { - err = s.closeActual() - } - s.m.Unlock() - return err -} - -func (s *Segment) loadConfig() error { - crcOffset := len(s.mm) - 4 - s.crc = binary.BigEndian.Uint32(s.mm[crcOffset : crcOffset+4]) - - verOffset := crcOffset - 4 - s.version = binary.BigEndian.Uint32(s.mm[verOffset : verOffset+4]) - if s.version != Version { - return fmt.Errorf("unsupported version %d != %d", s.version, Version) - } - - chunkOffset := verOffset - 4 - s.chunkMode = binary.BigEndian.Uint32(s.mm[chunkOffset : chunkOffset+4]) - - docValueOffset := chunkOffset - 8 - s.docValueOffset = binary.BigEndian.Uint64(s.mm[docValueOffset : docValueOffset+8]) - - fieldsIndexOffset := docValueOffset - 8 - s.fieldsIndexOffset = binary.BigEndian.Uint64(s.mm[fieldsIndexOffset : fieldsIndexOffset+8]) - - storedIndexOffset := fieldsIndexOffset - 8 - s.storedIndexOffset = binary.BigEndian.Uint64(s.mm[storedIndexOffset : storedIndexOffset+8]) - - numDocsOffset := storedIndexOffset - 8 - s.numDocs = binary.BigEndian.Uint64(s.mm[numDocsOffset : numDocsOffset+8]) - return nil -} - -func (s *SegmentBase) loadFields() error { - // NOTE for now we assume the fields index immediately precedes - // the footer, and if this changes, need to adjust accordingly (or - // store explicit length), where s.mem was sliced from s.mm in Open(). - fieldsIndexEnd := uint64(len(s.mem)) - - // iterate through fields index - var fieldID uint64 - for s.fieldsIndexOffset+(8*fieldID) < fieldsIndexEnd { - addr := binary.BigEndian.Uint64(s.mem[s.fieldsIndexOffset+(8*fieldID) : s.fieldsIndexOffset+(8*fieldID)+8]) - - dictLoc, read := binary.Uvarint(s.mem[addr:fieldsIndexEnd]) - n := uint64(read) - s.dictLocs = append(s.dictLocs, dictLoc) - - var nameLen uint64 - nameLen, read = binary.Uvarint(s.mem[addr+n : fieldsIndexEnd]) - n += uint64(read) - - name := string(s.mem[addr+n : addr+n+nameLen]) - s.fieldsInv = append(s.fieldsInv, name) - s.fieldsMap[name] = uint16(fieldID + 1) - - fieldID++ - } - return nil -} - -// Dictionary returns the term dictionary for the specified field -func (s *SegmentBase) Dictionary(field string) (segment.TermDictionary, error) { - dict, err := s.dictionary(field) - if err == nil && dict == nil { - return emptyDictionary, nil - } - return dict, err -} - -func (sb *SegmentBase) dictionary(field string) (rv *Dictionary, err error) { - fieldIDPlus1 := sb.fieldsMap[field] - if fieldIDPlus1 > 0 { - rv = &Dictionary{ - sb: sb, - field: field, - fieldID: fieldIDPlus1 - 1, - } - - dictStart := sb.dictLocs[rv.fieldID] - if dictStart > 0 { - var ok bool - sb.m.Lock() - if rv.fst, ok = sb.fieldFSTs[rv.fieldID]; !ok { - // read the length of the vellum data - vellumLen, read := binary.Uvarint(sb.mem[dictStart : dictStart+binary.MaxVarintLen64]) - fstBytes := sb.mem[dictStart+uint64(read) : dictStart+uint64(read)+vellumLen] - rv.fst, err = vellum.Load(fstBytes) - if err != nil { - sb.m.Unlock() - return nil, fmt.Errorf("dictionary field %s vellum err: %v", field, err) - } - - sb.fieldFSTs[rv.fieldID] = rv.fst - } - - sb.m.Unlock() - rv.fstReader, err = rv.fst.Reader() - if err != nil { - return nil, fmt.Errorf("dictionary field %s vellum reader err: %v", field, err) - } - } - } - - return rv, nil -} - -// visitDocumentCtx holds data structures that are reusable across -// multiple VisitDocument() calls to avoid memory allocations -type visitDocumentCtx struct { - buf []byte - reader bytes.Reader - arrayPos []uint64 -} - -var visitDocumentCtxPool = sync.Pool{ - New: func() interface{} { - reuse := &visitDocumentCtx{} - return reuse - }, -} - -// VisitStoredFields invokes the StoredFieldValueVisitor for each stored field -// for the specified doc number -func (s *SegmentBase) VisitStoredFields(num uint64, visitor segment.StoredFieldValueVisitor) error { - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - defer visitDocumentCtxPool.Put(vdc) - return s.visitStoredFields(vdc, num, visitor) -} - -func (s *SegmentBase) visitStoredFields(vdc *visitDocumentCtx, num uint64, - visitor segment.StoredFieldValueVisitor) error { - // first make sure this is a valid number in this segment - if num < s.numDocs { - meta, compressed := s.getDocStoredMetaAndCompressed(num) - - vdc.reader.Reset(meta) - - // handle _id field special case - idFieldValLen, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - idFieldVal := compressed[:idFieldValLen] - - keepGoing := visitor("_id", byte('t'), idFieldVal, nil) - if !keepGoing { - visitDocumentCtxPool.Put(vdc) - return nil - } - - // handle non-"_id" fields - compressed = compressed[idFieldValLen:] - - uncompressed, err := snappy.Decode(vdc.buf[:cap(vdc.buf)], compressed) - if err != nil { - return err - } - - for keepGoing { - field, err := binary.ReadUvarint(&vdc.reader) - if err == io.EOF { - break - } - if err != nil { - return err - } - typ, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - offset, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - l, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - numap, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - var arrayPos []uint64 - if numap > 0 { - if cap(vdc.arrayPos) < int(numap) { - vdc.arrayPos = make([]uint64, numap) - } - arrayPos = vdc.arrayPos[:numap] - for i := 0; i < int(numap); i++ { - ap, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return err - } - arrayPos[i] = ap - } - } - - value := uncompressed[offset : offset+l] - keepGoing = visitor(s.fieldsInv[field], byte(typ), value, arrayPos) - } - - vdc.buf = uncompressed - } - return nil -} - -// DocID returns the value of the _id field for the given docNum -func (s *SegmentBase) DocID(num uint64) ([]byte, error) { - if num >= s.numDocs { - return nil, nil - } - - vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) - - meta, compressed := s.getDocStoredMetaAndCompressed(num) - - vdc.reader.Reset(meta) - - // handle _id field special case - idFieldValLen, err := binary.ReadUvarint(&vdc.reader) - if err != nil { - return nil, err - } - idFieldVal := compressed[:idFieldValLen] - - visitDocumentCtxPool.Put(vdc) - - return idFieldVal, nil -} - -// Count returns the number of documents in this segment. -func (s *SegmentBase) Count() uint64 { - return s.numDocs -} - -// DocNumbers returns a bitset corresponding to the doc numbers of all the -// provided _id strings -func (s *SegmentBase) DocNumbers(ids []string) (*roaring.Bitmap, error) { - rv := roaring.New() - - if len(s.fieldsMap) > 0 { - idDict, err := s.dictionary("_id") - if err != nil { - return nil, err - } - - postingsList := emptyPostingsList - - sMax, err := idDict.fst.GetMaxKey() - if err != nil { - return nil, err - } - sMaxStr := string(sMax) - filteredIds := make([]string, 0, len(ids)) - for _, id := range ids { - if id <= sMaxStr { - filteredIds = append(filteredIds, id) - } - } - - for _, id := range filteredIds { - postingsList, err = idDict.postingsList([]byte(id), nil, postingsList) - if err != nil { - return nil, err - } - postingsList.OrInto(rv) - } - } - - return rv, nil -} - -// Fields returns the field names used in this segment -func (s *SegmentBase) Fields() []string { - return s.fieldsInv -} - -// Path returns the path of this segment on disk -func (s *Segment) Path() string { - return s.path -} - -// Close releases all resources associated with this segment -func (s *Segment) Close() (err error) { - return s.DecRef() -} - -func (s *Segment) closeActual() (err error) { - if s.mm != nil { - err = s.mm.Unmap() - } - // try to close file even if unmap failed - if s.f != nil { - err2 := s.f.Close() - if err == nil { - // try to return first error - err = err2 - } - } - return -} - -// some helpers i started adding for the command-line utility - -// Data returns the underlying mmaped data slice -func (s *Segment) Data() []byte { - return s.mm -} - -// CRC returns the CRC value stored in the file footer -func (s *Segment) CRC() uint32 { - return s.crc -} - -// Version returns the file version in the file footer -func (s *Segment) Version() uint32 { - return s.version -} - -// ChunkFactor returns the chunk factor in the file footer -func (s *Segment) ChunkMode() uint32 { - return s.chunkMode -} - -// FieldsIndexOffset returns the fields index offset in the file footer -func (s *Segment) FieldsIndexOffset() uint64 { - return s.fieldsIndexOffset -} - -// StoredIndexOffset returns the stored value index offset in the file footer -func (s *Segment) StoredIndexOffset() uint64 { - return s.storedIndexOffset -} - -// DocValueOffset returns the docValue offset in the file footer -func (s *Segment) DocValueOffset() uint64 { - return s.docValueOffset -} - -// NumDocs returns the number of documents in the file footer -func (s *Segment) NumDocs() uint64 { - return s.numDocs -} - -// DictAddr is a helper function to compute the file offset where the -// dictionary is stored for the specified field. -func (s *Segment) DictAddr(field string) (uint64, error) { - fieldIDPlus1, ok := s.fieldsMap[field] - if !ok { - return 0, fmt.Errorf("no such field '%s'", field) - } - - return s.dictLocs[fieldIDPlus1-1], nil -} - -func (s *SegmentBase) loadDvReaders() error { - if s.docValueOffset == fieldNotUninverted || s.numDocs == 0 { - return nil - } - - var read uint64 - for fieldID, field := range s.fieldsInv { - var fieldLocStart, fieldLocEnd uint64 - var n int - fieldLocStart, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) - if n <= 0 { - return fmt.Errorf("loadDvReaders: failed to read the docvalue offset start for field %d", fieldID) - } - read += uint64(n) - fieldLocEnd, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) - if n <= 0 { - return fmt.Errorf("loadDvReaders: failed to read the docvalue offset end for field %d", fieldID) - } - read += uint64(n) - - fieldDvReader, err := s.loadFieldDocValueReader(field, fieldLocStart, fieldLocEnd) - if err != nil { - return err - } - if fieldDvReader != nil { - s.fieldDvReaders[uint16(fieldID)] = fieldDvReader - s.fieldDvNames = append(s.fieldDvNames, field) - } - } - - return nil -} diff --git a/vendor/github.com/blevesearch/zapx/v15/sizes.go b/vendor/github.com/blevesearch/zapx/v15/sizes.go deleted file mode 100644 index 34166ea33..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/sizes.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2020 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "reflect" -) - -func init() { - var b bool - SizeOfBool = int(reflect.TypeOf(b).Size()) - var f32 float32 - SizeOfFloat32 = int(reflect.TypeOf(f32).Size()) - var f64 float64 - SizeOfFloat64 = int(reflect.TypeOf(f64).Size()) - var i int - SizeOfInt = int(reflect.TypeOf(i).Size()) - var m map[int]int - SizeOfMap = int(reflect.TypeOf(m).Size()) - var ptr *int - SizeOfPtr = int(reflect.TypeOf(ptr).Size()) - var slice []int - SizeOfSlice = int(reflect.TypeOf(slice).Size()) - var str string - SizeOfString = int(reflect.TypeOf(str).Size()) - var u8 uint8 - SizeOfUint8 = int(reflect.TypeOf(u8).Size()) - var u16 uint16 - SizeOfUint16 = int(reflect.TypeOf(u16).Size()) - var u32 uint32 - SizeOfUint32 = int(reflect.TypeOf(u32).Size()) - var u64 uint64 - SizeOfUint64 = int(reflect.TypeOf(u64).Size()) -} - -var SizeOfBool int -var SizeOfFloat32 int -var SizeOfFloat64 int -var SizeOfInt int -var SizeOfMap int -var SizeOfPtr int -var SizeOfSlice int -var SizeOfString int -var SizeOfUint8 int -var SizeOfUint16 int -var SizeOfUint32 int -var SizeOfUint64 int diff --git a/vendor/github.com/blevesearch/zapx/v15/write.go b/vendor/github.com/blevesearch/zapx/v15/write.go deleted file mode 100644 index 77aefdbfc..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/write.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2017 Couchbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zap - -import ( - "encoding/binary" - "io" - - "github.com/RoaringBitmap/roaring" -) - -// writes out the length of the roaring bitmap in bytes as varint -// then writes out the roaring bitmap itself -func writeRoaringWithLen(r *roaring.Bitmap, w io.Writer, - reuseBufVarint []byte) (int, error) { - buf, err := r.ToBytes() - if err != nil { - return 0, err - } - - var tw int - - // write out the length - n := binary.PutUvarint(reuseBufVarint, uint64(len(buf))) - nw, err := w.Write(reuseBufVarint[:n]) - tw += nw - if err != nil { - return tw, err - } - - // write out the roaring bytes - nw, err = w.Write(buf) - tw += nw - if err != nil { - return tw, err - } - - return tw, nil -} - -func persistFields(fieldsInv []string, w *CountHashWriter, dictLocs []uint64) (uint64, error) { - var rv uint64 - var fieldsOffsets []uint64 - - for fieldID, fieldName := range fieldsInv { - // record start of this field - fieldsOffsets = append(fieldsOffsets, uint64(w.Count())) - - // write out the dict location and field name length - _, err := writeUvarints(w, dictLocs[fieldID], uint64(len(fieldName))) - if err != nil { - return 0, err - } - - // write out the field name - _, err = w.Write([]byte(fieldName)) - if err != nil { - return 0, err - } - } - - // now write out the fields index - rv = uint64(w.Count()) - for fieldID := range fieldsInv { - err := binary.Write(w, binary.BigEndian, fieldsOffsets[fieldID]) - if err != nil { - return 0, err - } - } - - return rv, nil -} - -// FooterSize is the size of the footer record in bytes -// crc + ver + chunk + field offset + stored offset + num docs + docValueOffset -const FooterSize = 4 + 4 + 4 + 8 + 8 + 8 + 8 - -func persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64, - chunkMode uint32, crcBeforeFooter uint32, writerIn io.Writer) error { - w := NewCountHashWriter(writerIn) - w.crc = crcBeforeFooter - - // write out the number of docs - err := binary.Write(w, binary.BigEndian, numDocs) - if err != nil { - return err - } - // write out the stored field index location: - err = binary.Write(w, binary.BigEndian, storedIndexOffset) - if err != nil { - return err - } - // write out the field index location - err = binary.Write(w, binary.BigEndian, fieldsIndexOffset) - if err != nil { - return err - } - // write out the fieldDocValue location - err = binary.Write(w, binary.BigEndian, docValueOffset) - if err != nil { - return err - } - // write out 32-bit chunk factor - err = binary.Write(w, binary.BigEndian, chunkMode) - if err != nil { - return err - } - // write out 32-bit version - err = binary.Write(w, binary.BigEndian, Version) - if err != nil { - return err - } - // write out CRC-32 of everything upto but not including this CRC - err = binary.Write(w, binary.BigEndian, w.crc) - if err != nil { - return err - } - return nil -} - -func writeUvarints(w io.Writer, vals ...uint64) (tw int, err error) { - buf := make([]byte, binary.MaxVarintLen64) - for _, val := range vals { - n := binary.PutUvarint(buf, val) - var nw int - nw, err = w.Write(buf[:n]) - tw += nw - if err != nil { - return tw, err - } - } - return tw, err -} diff --git a/vendor/github.com/blevesearch/zapx/v15/zap.md b/vendor/github.com/blevesearch/zapx/v15/zap.md deleted file mode 100644 index d74dc548b..000000000 --- a/vendor/github.com/blevesearch/zapx/v15/zap.md +++ /dev/null @@ -1,177 +0,0 @@ -# ZAP File Format - -## Legend - -### Sections - - |========| - | | section - |========| - -### Fixed-size fields - - |--------| |----| |--| |-| - | | uint64 | | uint32 | | uint16 | | uint8 - |--------| |----| |--| |-| - -### Varints - - |~~~~~~~~| - | | varint(up to uint64) - |~~~~~~~~| - -### Arbitrary-length fields - - |--------...---| - | | arbitrary-length field (string, vellum, roaring bitmap) - |--------...---| - -### Chunked data - - [--------] - [ ] - [--------] - -## Overview - -Footer section describes the configuration of particular ZAP file. The format of footer is version-dependent, so it is necessary to check `V` field before the parsing. - - |==================================================| - | Stored Fields | - |==================================================| - |-----> | Stored Fields Index | - | |==================================================| - | | Dictionaries + Postings + DocValues | - | |==================================================| - | |---> | DocValues Index | - | | |==================================================| - | | | Fields | - | | |==================================================| - | | |-> | Fields Index | - | | | |========|========|========|========|====|====|====| - | | | | D# | SF | F | FDV | CF | V | CC | (Footer) - | | | |========|====|===|====|===|====|===|====|====|====| - | | | | | | - |-+-+-----------------| | | - | |--------------------------| | - |-------------------------------------| - - D#. Number of Docs. - SF. Stored Fields Index Offset. - F. Field Index Offset. - FDV. Field DocValue Offset. - CF. Chunk Factor. - V. Version. - CC. CRC32. - -## Stored Fields - -Stored Fields Index is `D#` consecutive 64-bit unsigned integers - offsets, where relevant Stored Fields Data records are located. - - 0 [SF] [SF + D# * 8] - | Stored Fields | Stored Fields Index | - |================================|==================================| - | | | - | |--------------------| ||--------|--------|. . .|--------|| - | |-> | Stored Fields Data | || 0 | 1 | | D# - 1 || - | | |--------------------| ||--------|----|---|. . .|--------|| - | | | | | - |===|============================|==============|===================| - | | - |-------------------------------------------| - -Stored Fields Data is an arbitrary size record, which consists of metadata and [Snappy](https://github.com/golang/snappy)-compressed data. - - Stored Fields Data - |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| - | MDS | CDS | MD | CD | - |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| - - MDS. Metadata size. - CDS. Compressed data size. - MD. Metadata. - CD. Snappy-compressed data. - -## Fields - -Fields Index section located between addresses `F` and `len(file) - len(footer)` and consist of `uint64` values (`F1`, `F2`, ...) which are offsets to records in Fields section. We have `F# = (len(file) - len(footer) - F) / sizeof(uint64)` fields. - - - (...) [F] [F + F#] - | Fields | Fields Index. | - |================================|================================| - | | | - | |~~~~~~~~|~~~~~~~~|---...---|||--------|--------|...|--------|| - ||->| Dict | Length | Name ||| 0 | 1 | | F# - 1 || - || |~~~~~~~~|~~~~~~~~|---...---|||--------|----|---|...|--------|| - || | | | - ||===============================|==============|=================| - | | - |----------------------------------------------| - - -## Dictionaries + Postings - -Each of fields has its own dictionary, encoded in [Vellum](https://github.com/couchbase/vellum) format. Dictionary consists of pairs `(term, offset)`, where `offset` indicates the position of postings (list of documents) for this particular term. - - |================================================================|- Dictionaries + - | | Postings + - | | DocValues - | Freq/Norm (chunked) | - | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | - | |->[ Freq | Norm (float32 under varint) ] | - | | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | - | | | - | |------------------------------------------------------------| | - | Location Details (chunked) | | - | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | - | |->[ Size | Pos | Start | End | Arr# | ArrPos | ... ] | | - | | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | - | | | | - | |----------------------| | | - | Postings List | | | - | |~~~~~~~~|~~~~~|~~|~~~~~~~~|-----------...--| | | - | |->| F/N | LD | Length | ROARING BITMAP | | | - | | |~~~~~|~~|~~~~~~~~|~~~~~~~~|-----------...--| | | - | | |----------------------------------------------| | - | |--------------------------------------| | - | Dictionary | | - | |~~~~~~~~|--------------------------|-...-| | - | |->| Length | VELLUM DATA : (TERM -> OFFSET) | | - | | |~~~~~~~~|----------------------------...-| | - | | | - |======|=========================================================|- DocValues Index - | | | - |======|=========================================================|- Fields - | | | - | |~~~~|~~~|~~~~~~~~|---...---| | - | | Dict | Length | Name | | - | |~~~~~~~~|~~~~~~~~|---...---| | - | | - |================================================================| - -## DocValues - -DocValues Index is `F#` pairs of varints, one pair per field. Each pair of varints indicates start and end point of DocValues slice. - - |================================================================| - | |------...--| | - | |->| DocValues |<-| | - | | |------...--| | | - |==|=================|===========================================|- DocValues Index - ||~|~~~~~~~~~|~~~~~~~|~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| - || DV1 START | DV1 STOP | . . . . . | DV(F#) START | DV(F#) END || - ||~~~~~~~~~~~|~~~~~~~~~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| - |================================================================| - -DocValues is chunked Snappy-compressed values for each document and field. - - [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] - [ Doc# in Chunk | Doc1 | Offset1 | ... | DocN | OffsetN | SNAPPY COMPRESSED DATA ] - [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] - -Last 16 bytes are description of chunks. - - |~~~~~~~~~~~~...~|----------------|----------------| - | Chunk Sizes | Chunk Size Arr | Chunk# | - |~~~~~~~~~~~~...~|----------------|----------------| diff --git a/vendor/github.com/blevesearch/bleve/v2/.gitignore b/vendor/github.com/blugelabs/bluge/.gitignore similarity index 93% rename from vendor/github.com/blevesearch/bleve/v2/.gitignore rename to vendor/github.com/blugelabs/bluge/.gitignore index ab7a1e21a..3bd3ef8b5 100644 --- a/vendor/github.com/blevesearch/bleve/v2/.gitignore +++ b/vendor/github.com/blugelabs/bluge/.gitignore @@ -10,7 +10,7 @@ query_string.y.go.tmp /analysis/token_filters/cld2/cld2-read-only /analysis/token_filters/cld2/libcld2_full.a -/cmd/bleve/bleve +/cmd/bluge/bluge vendor/** !vendor/manifest /y.output diff --git a/vendor/github.com/blugelabs/bluge/.golangci.yml b/vendor/github.com/blugelabs/bluge/.golangci.yml new file mode 100644 index 000000000..19c0b6196 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/.golangci.yml @@ -0,0 +1,248 @@ +linters-settings: + depguard: + list-type: blacklist + packages: + # logging is allowed only by logutils.Log, logrus + # is allowed to use only in logutils package + - github.com/sirupsen/logrus + packages-with-error-message: + - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 2 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - whyNoLint + - wrapperFunc + gocyclo: + min-complexity: 20 # increased from 15 to get us going, but not make things worse + goimports: + local-prefixes: github.com/golangci/golangci-lint + golint: + min-confidence: 0 + gomnd: + settings: + mnd: + # don't include the "operation" and "assign" + checks: argument,case,condition,return + govet: + check-shadowing: true + settings: + printf: + funcs: + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + nolintlint: + allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space) + allow-unused: false # report any unused nolint directives + require-explanation: false # don't require an explanation for nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +linters: + # please, do not use `enable-all`: it's deprecated and will be removed soon. + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - dupl + - errcheck + - funlen + - gochecknoinits + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - golint + - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - lll + - misspell + - nakedret + - nolintlint + - rowserrcheck + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + + # don't enable: + # - asciicheck + # - gochecknoglobals + # - gocognit + # - godot + # - godox + # - goerr113 + # - maligned + # - nestif + # - prealloc + # - testpackage + # - wsl + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: _test\.go + linters: + - gomnd + - lll + - funlen + - goconst + - gocyclo + - dupl + - gochecknoinits + - path: cmd/bluge/cmd + linters: + - gochecknoinits + - path: sizes.go + linters: + - gochecknoinits + + # we don't need secure random in test + - path: _test\.go + linters: + - gosec + text: G404 + + # allow lock test to invoke itself + - path: index/lock/lock_test.go + linters: + - gosec + text: G204 + + # cannot safely refactor until optimization tests are in place again + - path: index/optimize.go + linters: + - funlen + + # FIXME refactor persister + - path: index/persister.go + linters: + - funlen + text: persisterLoop + + # many analysis tests have non-english words + # lots of false alarms on duplicate code + - path: ^analysis/.*_test\.go$ + linters: + - misspell + - dupl + + # used in some analyzers + - path: analysis/lang/ + linters: + - stylecheck + text: ST1018 + + # many language specific analyzers have + # non-english words or non-word substrings + # lots of false alrams on duplicate code + - path: ^analysis/lang/.*\.go$ + linters: + - misspell + - dupl + - gomnd +# - stylecheck + - gocyclo + - funlen + + # allow init here + - path: analysis/lang/in/scripts.go + linters: + - gochecknoinits + + # and allow init here + - path: analysis/freq.go + linters: + - gochecknoinits + + - path: search/searcher/search_fuzzy.go + linters: + - gochecknoinits + + # has long regexp + - path: analysis/tokenizer/web.go + linters: + - lll + + # needs rewrite to avoid fallthrough + # skipping funlen,gocyclo since ported + - path: analysis/char/asciifolding.go + linters: + - gocritic + - funlen + - gocyclo + + # many hard-coded values + - path: numeric/geo/sloppy.go + linters: + - gomnd + - gochecknoinits + - path: numeric/prefix_coded.go + linters: + - gomnd + + # allow init methods for our size calculations + - path: size.go + linters: + - gochecknoinits + + # https://github.com/go-critic/go-critic/issues/926 + - linters: + - gocritic + text: "unnecessaryDefer:" + + # until we make final decision on config + - linters: + - gocritic + text: "config is heavy" + +run: + skip-dirs: + - test/testdata_etc + - internal/cache + - internal/renameio + - internal/robustio + +# golangci.com configuration +# https://github.com/golangci/golangci/wiki/Configuration +service: + golangci-lint-version: 1.23.x # use the fixed version to not introduce new linters unexpectedly + prepare: + - echo "here I can run custom commands, but no preparation needed for this repo" diff --git a/vendor/github.com/blugelabs/bluge/AUTHORS b/vendor/github.com/blugelabs/bluge/AUTHORS new file mode 100644 index 000000000..930e1db60 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/AUTHORS @@ -0,0 +1,13 @@ +# This is the official list of Bluge authors for copyright purposes. +# +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name +# +# Please keep the list sorted. + +Marty Schoch +Michael Schuett +Akshay Shekher +Sergio Rubio diff --git a/vendor/github.com/blugelabs/bluge/CONTRIBUTING.md b/vendor/github.com/blugelabs/bluge/CONTRIBUTING.md new file mode 100644 index 000000000..b95ed0a5a --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/CONTRIBUTING.md @@ -0,0 +1,11 @@ +# Contributing to Bluge + +Bluge is an open source project. + +Thank you for your contribution, we appreciate your help! + +## Contributing code + +Portions of existing code are copyright Couchbase, Inc. + +All new contributions should be copyright The Bluge Authors. New contributors should add an appropriate entry to the AUTHORS file at the root of the repository. All contributions must be distributed under the Apache License found in the LICENSE file. diff --git a/vendor/github.com/blevesearch/bleve/v2/LICENSE b/vendor/github.com/blugelabs/bluge/LICENSE similarity index 100% rename from vendor/github.com/blevesearch/bleve/v2/LICENSE rename to vendor/github.com/blugelabs/bluge/LICENSE diff --git a/vendor/github.com/blugelabs/bluge/README.md b/vendor/github.com/blugelabs/bluge/README.md new file mode 100644 index 000000000..e94e35fd0 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/README.md @@ -0,0 +1,85 @@ +# ![Bluge](docs/bluge.png) Bluge + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/blugelabs/bluge)](https://pkg.go.dev/github.com/blugelabs/bluge) +[![Tests](https://github.com/blugelabs/bluge/workflows/Tests/badge.svg?branch=master&event=push)](https://github.com/blugelabs/bluge/actions?query=workflow%3ATests+event%3Apush+branch%3Amaster) +[![Lint](https://github.com/blugelabs/bluge/workflows/Lint/badge.svg?branch=master&event=push)](https://github.com/blugelabs/bluge/actions?query=workflow%3ALint+event%3Apush+branch%3Amaster) + +modern text indexing in go - [blugelabs.com](https://www.blugelabs.com/) + +## Features + +* Supported field types: + * Text, Numeric, Date, Geo Point +* Supported query types: + * Term, Phrase, Match, Match Phrase, Prefix + * Conjunction, Disjunction, Boolean + * Numeric Range, Date Range +* BM25 Similarity/Scoring with pluggable interfaces +* Search result match highlighting +* Extendable Aggregations: + * Bucketing + * Terms + * Numeric Range + * Date Range + * Metrics + * Min/Max/Count/Sum + * Avg/Weighted Avg + * Cardinality Estimation ([HyperLogLog++](https://github.com/axiomhq/hyperloglog)) + * Quantile Approximation ([T-Digest](https://github.com/caio/go-tdigest)) + +## Indexing + +```go + config := bluge.DefaultConfig(path) + writer, err := bluge.OpenWriter(config) + if err != nil { + log.Fatalf("error opening writer: %v", err) + } + defer writer.Close() + + doc := bluge.NewDocument("example"). + AddField(bluge.NewTextField("name", "bluge")) + + err = writer.Update(doc.ID(), doc) + if err != nil { + log.Fatalf("error updating document: %v", err) + } +``` + +## Querying + +```go + reader, err := writer.Reader() + if err != nil { + log.Fatalf("error getting index reader: %v", err) + } + defer reader.Close() + + query := bluge.NewMatchQuery("bluge").SetField("name") + request := bluge.NewTopNSearch(10, query). + WithStandardAggregations() + documentMatchIterator, err := reader.Search(context.Background(), request) + if err != nil { + log.Fatalf("error executing search: %v", err) + } + match, err := documentMatchIterator.Next() + for err == nil && match != nil { + err = match.VisitStoredFields(func(field string, value []byte) bool { + if field == "_id" { + fmt.Printf("match: %s\n", string(value)) + } + return true + }) + if err != nil { + log.Fatalf("error loading stored fields: %v", err) + } + match, err = documentMatchIterator.Next() + } + if err != nil { + log.Fatalf("error iterator document matches: %v", err) + } +``` + +## License + +Apache License Version 2.0 diff --git a/vendor/github.com/blevesearch/zapx/v14/plugin.go b/vendor/github.com/blugelabs/bluge/analysis/analyzer/keyword.go similarity index 71% rename from vendor/github.com/blevesearch/zapx/v14/plugin.go rename to vendor/github.com/blugelabs/bluge/analysis/analyzer/keyword.go index f67297ec2..5fc13e3d9 100644 --- a/vendor/github.com/blevesearch/zapx/v14/plugin.go +++ b/vendor/github.com/blugelabs/bluge/analysis/analyzer/keyword.go @@ -12,16 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package analyzer -// ZapPlugin implements the Plugin interface of -// the blevesearch/scorch_segment_api pkg -type ZapPlugin struct{} +import ( + "github.com/blugelabs/bluge/analysis" + "github.com/blugelabs/bluge/analysis/tokenizer" +) -func (*ZapPlugin) Type() string { - return Type -} - -func (*ZapPlugin) Version() uint32 { - return Version +func NewKeywordAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Tokenizer: tokenizer.NewSingleTokenTokenizer(), + } } diff --git a/vendor/github.com/blevesearch/zapx/v11/plugin.go b/vendor/github.com/blugelabs/bluge/analysis/analyzer/simple.go similarity index 62% rename from vendor/github.com/blevesearch/zapx/v11/plugin.go rename to vendor/github.com/blugelabs/bluge/analysis/analyzer/simple.go index f67297ec2..97bc6ccdf 100644 --- a/vendor/github.com/blevesearch/zapx/v11/plugin.go +++ b/vendor/github.com/blugelabs/bluge/analysis/analyzer/simple.go @@ -12,16 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package analyzer -// ZapPlugin implements the Plugin interface of -// the blevesearch/scorch_segment_api pkg -type ZapPlugin struct{} +import ( + "github.com/blugelabs/bluge/analysis" + "github.com/blugelabs/bluge/analysis/token" + "github.com/blugelabs/bluge/analysis/tokenizer" +) -func (*ZapPlugin) Type() string { - return Type -} - -func (*ZapPlugin) Version() uint32 { - return Version +func NewSimpleAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Tokenizer: tokenizer.NewLetterTokenizer(), + TokenFilters: []analysis.TokenFilter{ + token.NewLowerCaseFilter(), + }, + } } diff --git a/vendor/github.com/blugelabs/bluge/analysis/analyzer/standard.go b/vendor/github.com/blugelabs/bluge/analysis/analyzer/standard.go new file mode 100644 index 000000000..41e3bbd39 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/analyzer/standard.go @@ -0,0 +1,30 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analyzer + +import ( + "github.com/blugelabs/bluge/analysis" + "github.com/blugelabs/bluge/analysis/token" + "github.com/blugelabs/bluge/analysis/tokenizer" +) + +func NewStandardAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Tokenizer: tokenizer.NewUnicodeTokenizer(), + TokenFilters: []analysis.TokenFilter{ + token.NewLowerCaseFilter(), + }, + } +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/analyzer/web.go b/vendor/github.com/blugelabs/bluge/analysis/analyzer/web.go new file mode 100644 index 000000000..3b78846c1 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/analyzer/web.go @@ -0,0 +1,32 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analyzer + +import ( + "github.com/blugelabs/bluge/analysis" + "github.com/blugelabs/bluge/analysis/lang/en" + "github.com/blugelabs/bluge/analysis/token" + "github.com/blugelabs/bluge/analysis/tokenizer" +) + +func NewWebAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Tokenizer: tokenizer.NewWebTokenizer(), + TokenFilters: []analysis.TokenFilter{ + token.NewLowerCaseFilter(), + en.StopWordsFilter(), + }, + } +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/freq.go b/vendor/github.com/blugelabs/bluge/analysis/freq.go new file mode 100644 index 000000000..85f037dae --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/freq.go @@ -0,0 +1,200 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "reflect" + + segment "github.com/blugelabs/bluge_segment_api" +) + +var reflectStaticSizeTokenLocation int +var reflectStaticSizeTokenFreq int + +func init() { + var tl TokenLocation + reflectStaticSizeTokenLocation = int(reflect.TypeOf(tl).Size()) + var tf TokenFreq + reflectStaticSizeTokenFreq = int(reflect.TypeOf(tf).Size()) +} + +// TokenLocation represents one occurrence of a term at a particular location in +// a field. Start, End and Position have the same meaning as in analysis.Token. +// Field and ArrayPositions identify the field value in the source document. +// See document.Field for details. +type TokenLocation struct { + FieldVal string + StartVal int + EndVal int + PositionVal int +} + +func (tl *TokenLocation) Field() string { + return tl.FieldVal +} + +func (tl *TokenLocation) Pos() int { + return tl.PositionVal +} + +func (tl *TokenLocation) Start() int { + return tl.StartVal +} + +func (tl *TokenLocation) End() int { + return tl.EndVal +} + +func (tl *TokenLocation) Size() int { + return reflectStaticSizeTokenLocation +} + +// TokenFreq represents all the occurrences of a term in all fields of a +// document. +type TokenFreq struct { + TermVal []byte + Locations []*TokenLocation + frequency int +} + +func (tf *TokenFreq) Size() int { + rv := reflectStaticSizeTokenFreq + rv += len(tf.TermVal) + for _, loc := range tf.Locations { + rv += loc.Size() + } + return rv +} + +func (tf *TokenFreq) Term() []byte { + return tf.TermVal +} + +func (tf *TokenFreq) Frequency() int { + return tf.frequency +} + +func (tf *TokenFreq) EachLocation(location segment.VisitLocation) { + for _, tl := range tf.Locations { + location(tl) + } +} + +// TokenFrequencies maps document terms to their combined frequencies from all +// fields. +type TokenFrequencies map[string]*TokenFreq + +func (tfs TokenFrequencies) Size() int { + rv := sizeOfMap + rv += len(tfs) * (sizeOfString + sizeOfPtr) + for k, v := range tfs { + rv += len(k) + rv += v.Size() + } + return rv +} + +func (tfs TokenFrequencies) MergeAll(remoteField string, other TokenFrequencies) { + // walk the new token frequencies + for tfk, tf := range other { + tfs.mergeOne(remoteField, tfk, tf) + } +} + +func (tfs TokenFrequencies) mergeOne(remoteField, tfk string, tf *TokenFreq) { + // set the remoteField value in incoming token freqs + for _, l := range tf.Locations { + l.FieldVal = remoteField + } + existingTf, exists := tfs[tfk] + if exists { + existingTf.Locations = append(existingTf.Locations, tf.Locations...) + existingTf.frequency += tf.frequency + } else { + tfs[tfk] = &TokenFreq{ + TermVal: tf.TermVal, + frequency: tf.frequency, + Locations: make([]*TokenLocation, len(tf.Locations)), + } + copy(tfs[tfk].Locations, tf.Locations) + } +} + +func (tfs TokenFrequencies) MergeOneBytes(remoteField string, tfk []byte, tf *TokenFreq) { + // set the remoteField value in incoming token freqs + for _, l := range tf.Locations { + l.FieldVal = remoteField + } + existingTf, exists := tfs[string(tfk)] + if exists { + existingTf.Locations = append(existingTf.Locations, tf.Locations...) + existingTf.frequency += tf.frequency + } else { + tfs[string(tfk)] = &TokenFreq{ + TermVal: tf.TermVal, + frequency: tf.frequency, + Locations: make([]*TokenLocation, len(tf.Locations)), + } + copy(tfs[string(tfk)].Locations, tf.Locations) + } +} + +func TokenFrequency(tokens TokenStream, includeTermVectors bool, startOffset int) ( + tokenFreqs TokenFrequencies, position int) { + tokenFreqs = make(map[string]*TokenFreq, len(tokens)) + + if includeTermVectors { + tls := make([]TokenLocation, len(tokens)) + tlNext := 0 + + position = startOffset + for _, token := range tokens { + position += token.PositionIncr + tls[tlNext] = TokenLocation{ + StartVal: token.Start, + EndVal: token.End, + PositionVal: position, + } + + curr, ok := tokenFreqs[string(token.Term)] + if ok { + curr.Locations = append(curr.Locations, &tls[tlNext]) + curr.frequency++ + } else { + tokenFreqs[string(token.Term)] = &TokenFreq{ + TermVal: token.Term, + Locations: []*TokenLocation{&tls[tlNext]}, + frequency: 1, + } + } + + tlNext++ + } + } else { + for _, token := range tokens { + curr, exists := tokenFreqs[string(token.Term)] + if exists { + curr.frequency++ + } else { + tokenFreqs[string(token.Term)] = &TokenFreq{ + TermVal: token.Term, + frequency: 1, + } + } + } + } + + return tokenFreqs, position +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/lang/en/analyzer_en.go b/vendor/github.com/blugelabs/bluge/analysis/lang/en/analyzer_en.go new file mode 100644 index 000000000..1aa7bd764 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/lang/en/analyzer_en.go @@ -0,0 +1,42 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package en implements an analyzer with reasonable defaults for processing +// English text. +// +// It strips possessive suffixes ('s), transforms tokens to lower case, +// removes stopwords from a built-in list, and applies porter stemming. +// +// The built-in stopwords list is defined in EnglishStopWords. +package en + +import ( + "github.com/blugelabs/bluge/analysis" + "github.com/blugelabs/bluge/analysis/token" + "github.com/blugelabs/bluge/analysis/tokenizer" +) + +const AnalyzerName = "en" + +func NewAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Tokenizer: tokenizer.NewUnicodeTokenizer(), + TokenFilters: []analysis.TokenFilter{ + NewPossessiveFilter(), + token.NewLowerCaseFilter(), + StopWordsFilter(), + StemmerFilter(), + }, + } +} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/possessive_filter_en.go b/vendor/github.com/blugelabs/bluge/analysis/lang/en/possessive_filter_en.go similarity index 73% rename from vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/possessive_filter_en.go rename to vendor/github.com/blugelabs/bluge/analysis/lang/en/possessive_filter_en.go index 79c2489e2..1e95a0645 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/possessive_filter_en.go +++ b/vendor/github.com/blugelabs/bluge/analysis/lang/en/possessive_filter_en.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,20 +17,13 @@ package en import ( "unicode/utf8" - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/registry" + "github.com/blugelabs/bluge/analysis" ) -// PossessiveName is the name PossessiveFilter is registered as -// in the bleve registry. -const PossessiveName = "possessive_en" - const rightSingleQuotationMark = '’' const apostrophe = '\'' const fullWidthApostrophe = ''' -const apostropheChars = rightSingleQuotationMark + apostrophe + fullWidthApostrophe - // PossessiveFilter implements a TokenFilter which // strips the English possessive suffix ('s) from tokens. // It handle a variety of apostrophe types, is case-insensitive @@ -57,11 +50,3 @@ func (s *PossessiveFilter) Filter(input analysis.TokenStream) analysis.TokenStre } return input } - -func PossessiveFilterConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) { - return NewPossessiveFilter(), nil -} - -func init() { - registry.RegisterTokenFilter(PossessiveName, PossessiveFilterConstructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/stemmer_en_snowball.go b/vendor/github.com/blugelabs/bluge/analysis/lang/en/stemmer_en_snowball.go similarity index 68% rename from vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/stemmer_en_snowball.go rename to vendor/github.com/blugelabs/bluge/analysis/lang/en/stemmer_en_snowball.go index ab30b8b19..340a2f55c 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/stemmer_en_snowball.go +++ b/vendor/github.com/blugelabs/bluge/analysis/lang/en/stemmer_en_snowball.go @@ -15,19 +15,15 @@ package en import ( - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/snowballstem" "github.com/blevesearch/snowballstem/english" + "github.com/blugelabs/bluge/analysis" ) -const SnowballStemmerName = "stemmer_en_snowball" - type EnglishStemmerFilter struct { } -func NewEnglishStemmerFilter() *EnglishStemmerFilter { +func StemmerFilter() *EnglishStemmerFilter { return &EnglishStemmerFilter{} } @@ -39,11 +35,3 @@ func (s *EnglishStemmerFilter) Filter(input analysis.TokenStream) analysis.Token } return input } - -func EnglishStemmerFilterConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) { - return NewEnglishStemmerFilter(), nil -} - -func init() { - registry.RegisterTokenFilter(SnowballStemmerName, EnglishStemmerFilterConstructor) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/stats.go b/vendor/github.com/blugelabs/bluge/analysis/lang/en/stop_filter_en.go similarity index 73% rename from vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/stats.go rename to vendor/github.com/blugelabs/bluge/analysis/lang/en/stop_filter_en.go index e50e55274..821fd1695 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb/stats.go +++ b/vendor/github.com/blugelabs/bluge/analysis/lang/en/stop_filter_en.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,15 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package boltdb +package en -import "encoding/json" +import ( + "github.com/blugelabs/bluge/analysis/token" +) -type stats struct { - s *Store -} - -func (s *stats) MarshalJSON() ([]byte, error) { - bs := s.s.db.Stats() - return json.Marshal(bs) +func StopWordsFilter() *token.StopTokensFilter { + return token.NewStopTokensFilter(StopWords()) } diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/stop_words_en.go b/vendor/github.com/blugelabs/bluge/analysis/lang/en/stop_words_en.go similarity index 90% rename from vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/stop_words_en.go rename to vendor/github.com/blugelabs/bluge/analysis/lang/en/stop_words_en.go index 9b6ca86a7..c655fdd1d 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/stop_words_en.go +++ b/vendor/github.com/blugelabs/bluge/analysis/lang/en/stop_words_en.go @@ -1,18 +1,15 @@ package en import ( - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/registry" + "github.com/blugelabs/bluge/analysis" ) -const StopName = "stop_en" - -// EnglishStopWords is the built-in list of stopwords used by the "stop_en" TokenFilter. +// StopWordsBytes is the built-in list of stopwords used by the "stop_en" TokenFilter. // // this content was obtained from: // lucene-4.7.2/analysis/common/src/resources/org/apache/lucene/analysis/snowball/ // ` was changed to ' to allow for literal string -var EnglishStopWords = []byte(` | From svn.tartarus.org/snowball/trunk/website/algorithms/english/stop.txt +var StopWordsBytes = []byte(` | From svn.tartarus.org/snowball/trunk/website/algorithms/english/stop.txt | This file is distributed under the BSD License. | See http://snowball.tartarus.org/license.php | Also see http://www.opensource.org/licenses/bsd-license.html @@ -333,12 +330,8 @@ very | long `) -func TokenMapConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenMap, error) { +func StopWords() analysis.TokenMap { rv := analysis.NewTokenMap() - err := rv.LoadBytes(EnglishStopWords) - return rv, err -} - -func init() { - registry.RegisterTokenMap(StopName, TokenMapConstructor) + rv.LoadBytes(StopWordsBytes) + return rv } diff --git a/vendor/github.com/blugelabs/bluge/analysis/size.go b/vendor/github.com/blugelabs/bluge/analysis/size.go new file mode 100644 index 000000000..d56ad7313 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/size.go @@ -0,0 +1,16 @@ +package analysis + +import "reflect" + +var sizeOfMap int +var sizeOfPtr int +var sizeOfString int + +func init() { + var m map[int]int + sizeOfMap = int(reflect.TypeOf(m).Size()) + var ptr *int + sizeOfPtr = int(reflect.TypeOf(ptr).Size()) + var str string + sizeOfString = int(reflect.TypeOf(str).Size()) +} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/test_words.txt b/vendor/github.com/blugelabs/bluge/analysis/test_words.txt similarity index 100% rename from vendor/github.com/blevesearch/bleve/v2/analysis/test_words.txt rename to vendor/github.com/blugelabs/bluge/analysis/test_words.txt diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/apostrophe.go b/vendor/github.com/blugelabs/bluge/analysis/token/apostrophe.go new file mode 100644 index 000000000..2f448a7e2 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/apostrophe.go @@ -0,0 +1,41 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "bytes" + + "github.com/blugelabs/bluge/analysis" +) + +const Apostrophes = string(Apostrophe) + string(RightSingleQuotationMark) + +type ApostropheFilter struct{} + +func NewApostropheFilter() *ApostropheFilter { + return &ApostropheFilter{} +} + +func (s *ApostropheFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + for _, token := range input { + firstApostrophe := bytes.IndexAny(token.Term, Apostrophes) + if firstApostrophe >= 0 { + // found an apostrophe + token.Term = token.Term[0:firstApostrophe] + } + } + + return input +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/camelcase.go b/vendor/github.com/blugelabs/bluge/analysis/token/camelcase.go new file mode 100644 index 000000000..de8207432 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/camelcase.go @@ -0,0 +1,65 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "bytes" + "unicode/utf8" + + "github.com/blugelabs/bluge/analysis" +) + +// CamelCaseFilter splits a given token into a set of tokens where each resulting token +// falls into one the following classes: +// 1) Upper case followed by lower case letters. +// Terminated by a number, an upper case letter, and a non alpha-numeric symbol. +// 2) Upper case followed by upper case letters. +// Terminated by a number, an upper case followed by a lower case letter, and a non alpha-numeric symbol. +// 3) Lower case followed by lower case letters. +// Terminated by a number, an upper case letter, and a non alpha-numeric symbol. +// 4) Number followed by numbers. +// Terminated by a letter, and a non alpha-numeric symbol. +// 5) Non alpha-numeric symbol followed by non alpha-numeric symbols. +// Terminated by a number, and a letter. +// +// It does a one-time sequential pass over an input token, from left to right. +// The scan is greedy and generates the longest substring that fits into one of the classes. +// +// See the test file for examples of classes and their parsings. +type CamelCaseFilter struct{} + +func NewCamelCaseFilter() *CamelCaseFilter { + return &CamelCaseFilter{} +} + +func (f *CamelCaseFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + rv := make(analysis.TokenStream, 0, len(input)) + + for _, token := range input { + runeCount := utf8.RuneCount(token.Term) + runes := bytes.Runes(token.Term) + + p := NewParser(runeCount, token.Start) + for i := 0; i < runeCount; i++ { + if i+1 >= runeCount { + p.Push(runes[i], nil) + } else { + p.Push(runes[i], &runes[i+1]) + } + } + rv = append(rv, p.FlushTokens()...) + } + return rv +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/camelcase_parser.go b/vendor/github.com/blugelabs/bluge/analysis/token/camelcase_parser.go new file mode 100644 index 000000000..c119c3e35 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/camelcase_parser.go @@ -0,0 +1,100 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "github.com/blugelabs/bluge/analysis" +) + +func (p *Parser) buildTokenFromTerm(buffer []rune) *analysis.Token { + term := analysis.BuildTermFromRunes(buffer) + token := &analysis.Token{ + Term: term, + PositionIncr: 1, + Start: p.index, + End: p.index + len(term), + } + p.index += len(term) + return token +} + +// Parser accepts a symbol and passes it to the current state (representing a class). +// The state can accept it (and accumulate it). Otherwise, the parser creates a new state that +// starts with the pushed symbol. +// +// Parser accumulates a new resulting token every time it switches state. +// Use FlushTokens() to get the results after the last symbol was pushed. +type Parser struct { + bufferLen int + buffer []rune + current State + tokens []*analysis.Token + index int +} + +func NewParser(length, index int) *Parser { + return &Parser{ + bufferLen: length, + buffer: make([]rune, 0, length), + tokens: make([]*analysis.Token, 0, length), + index: index, + } +} + +func (p *Parser) Push(sym rune, peek *rune) { + if p.current == nil { + // the start of parsing + p.current = p.NewState(sym) + p.buffer = append(p.buffer, sym) + } else if p.current.Member(sym, peek) { + // same state, just accumulate + p.buffer = append(p.buffer, sym) + } else { + // the old state is no more, thus convert the buffer + p.tokens = append(p.tokens, p.buildTokenFromTerm(p.buffer)) + + // let the new state begin + p.current = p.NewState(sym) + p.buffer = make([]rune, 0, p.bufferLen) + p.buffer = append(p.buffer, sym) + } +} + +// Note. States have to have different starting symbols. +func (p *Parser) NewState(sym rune) State { + var found State + + found = &LowerCaseState{} + if found.StartSym(sym) { + return found + } + + found = &UpperCaseState{} + if found.StartSym(sym) { + return found + } + + found = &NumberCaseState{} + if found.StartSym(sym) { + return found + } + + return &NonAlphaNumericCaseState{} +} + +func (p *Parser) FlushTokens() []*analysis.Token { + p.tokens = append(p.tokens, p.buildTokenFromTerm(p.buffer)) + return p.tokens +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/camelcase_states.go b/vendor/github.com/blugelabs/bluge/analysis/token/camelcase_states.go new file mode 100644 index 000000000..553bffa28 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/camelcase_states.go @@ -0,0 +1,87 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "unicode" +) + +// States codify the classes that the parser recognizes. +type State interface { + // is _sym_ the start character + StartSym(sym rune) bool + + // is _sym_ a member of a class. + // peek, the next sym on the tape, can also be used to determine a class. + Member(sym rune, peek *rune) bool +} + +type LowerCaseState struct{} + +func (s *LowerCaseState) Member(sym rune, peek *rune) bool { + return unicode.IsLower(sym) +} + +func (s *LowerCaseState) StartSym(sym rune) bool { + return s.Member(sym, nil) +} + +type UpperCaseState struct { + startedCollecting bool // denotes that the start character has been read + collectingUpper bool // denotes if this is a class of all upper case letters +} + +func (s *UpperCaseState) Member(sym rune, peek *rune) bool { + if !(unicode.IsLower(sym) || unicode.IsUpper(sym)) { + return false + } + + if peek != nil && unicode.IsUpper(sym) && unicode.IsLower(*peek) { + return false + } + + if !s.startedCollecting { + // now we have to determine if upper-case letters are collected. + s.startedCollecting = true + s.collectingUpper = unicode.IsUpper(sym) + return true + } + + return s.collectingUpper == unicode.IsUpper(sym) +} + +func (s *UpperCaseState) StartSym(sym rune) bool { + return unicode.IsUpper(sym) +} + +type NumberCaseState struct{} + +func (s *NumberCaseState) Member(sym rune, peek *rune) bool { + return unicode.IsNumber(sym) +} + +func (s *NumberCaseState) StartSym(sym rune) bool { + return s.Member(sym, nil) +} + +type NonAlphaNumericCaseState struct{} + +func (s *NonAlphaNumericCaseState) Member(sym rune, peek *rune) bool { + return !unicode.IsLower(sym) && !unicode.IsUpper(sym) && !unicode.IsNumber(sym) +} + +func (s *NonAlphaNumericCaseState) StartSym(sym rune) bool { + return s.Member(sym, nil) +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/dict.go b/vendor/github.com/blugelabs/bluge/analysis/token/dict.go new file mode 100644 index 000000000..8f4154eae --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/dict.go @@ -0,0 +1,94 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "bytes" + "unicode/utf8" + + "github.com/blugelabs/bluge/analysis" +) + +type DictionaryCompoundFilter struct { + dict analysis.TokenMap + minWordSize int + minSubWordSize int + maxSubWordSize int + onlyLongestMatch bool +} + +func NewDictionaryCompoundFilter(dict analysis.TokenMap, minWordSize, minSubWordSize, maxSubWordSize int, + onlyLongestMatch bool) *DictionaryCompoundFilter { + return &DictionaryCompoundFilter{ + dict: dict, + minWordSize: minWordSize, + minSubWordSize: minSubWordSize, + maxSubWordSize: maxSubWordSize, + onlyLongestMatch: onlyLongestMatch, + } +} + +func (f *DictionaryCompoundFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + rv := make(analysis.TokenStream, 0, len(input)) + + for _, token := range input { + rv = append(rv, token) + tokenLen := utf8.RuneCount(token.Term) + if tokenLen >= f.minWordSize { + newtokens := f.decompose(token) + for _, newtoken := range newtokens { + rv = append(rv, newtoken) + } + } + } + + return rv +} + +func (f *DictionaryCompoundFilter) decompose(token *analysis.Token) []*analysis.Token { + runes := bytes.Runes(token.Term) + rv := make([]*analysis.Token, 0) + rlen := len(runes) + for i := 0; i <= (rlen - f.minSubWordSize); i++ { + var longestMatchToken *analysis.Token + for j := f.minSubWordSize; j <= f.maxSubWordSize; j++ { + if i+j > rlen { + break + } + _, inDict := f.dict[string(runes[i:i+j])] + if inDict { + newtoken := analysis.Token{ + Term: []byte(string(runes[i : i+j])), + PositionIncr: 0, + Start: token.Start + i, + End: token.Start + i + j, + Type: token.Type, + KeyWord: token.KeyWord, + } + if f.onlyLongestMatch { + if longestMatchToken == nil || utf8.RuneCount(longestMatchToken.Term) < j { + longestMatchToken = &newtoken + } + } else { + rv = append(rv, &newtoken) + } + } + } + if f.onlyLongestMatch && longestMatchToken != nil { + rv = append(rv, longestMatchToken) + } + } + return rv +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/edgengram.go b/vendor/github.com/blugelabs/bluge/analysis/token/edgengram.go new file mode 100644 index 000000000..e0d005490 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/edgengram.go @@ -0,0 +1,96 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "bytes" + "unicode/utf8" + + "github.com/blugelabs/bluge/analysis" +) + +type Side bool + +const BACK Side = true +const FRONT Side = false + +type EdgeNgramFilter struct { + back Side + minLength int + maxLength int +} + +func NewEdgeNgramFilter(side Side, minLength, maxLength int) *EdgeNgramFilter { + return &EdgeNgramFilter{ + back: side, + minLength: minLength, + maxLength: maxLength, + } +} + +func (s *EdgeNgramFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + rv := make(analysis.TokenStream, 0, len(input)) + + for _, token := range input { + first := true + runeCount := utf8.RuneCount(token.Term) + runes := bytes.Runes(token.Term) + if s.back { + i := runeCount + // index of the starting rune for this token + for ngramSize := s.minLength; ngramSize <= s.maxLength; ngramSize++ { + // build an ngram of this size starting at i + if i-ngramSize >= 0 { + ngramTerm := analysis.BuildTermFromRunes(runes[i-ngramSize : i]) + token := analysis.Token{ + PositionIncr: 0, + Start: token.Start, + End: token.End, + Type: token.Type, + Term: ngramTerm, + } + if first { + token.PositionIncr = 1 // set first token to offset 1 + first = false + } + rv = append(rv, &token) + } + } + } else { + i := 0 + // index of the starting rune for this token + for ngramSize := s.minLength; ngramSize <= s.maxLength; ngramSize++ { + // build an ngram of this size starting at i + if i+ngramSize <= runeCount { + ngramTerm := analysis.BuildTermFromRunes(runes[i : i+ngramSize]) + token := analysis.Token{ + PositionIncr: 0, + Start: token.Start, + End: token.End, + Type: token.Type, + Term: ngramTerm, + } + if first { + token.PositionIncr = 1 // set first token to offset 1 + first = false + } + rv = append(rv, &token) + } + } + } + } + + return rv +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/elision.go b/vendor/github.com/blugelabs/bluge/analysis/token/elision.go new file mode 100644 index 000000000..1efc3889d --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/elision.go @@ -0,0 +1,54 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "unicode/utf8" + + "github.com/blugelabs/bluge/analysis" +) + +const RightSingleQuotationMark = '’' +const Apostrophe = '\'' + +type ElisionFilter struct { + articles analysis.TokenMap +} + +func NewElisionFilter(articles analysis.TokenMap) *ElisionFilter { + return &ElisionFilter{ + articles: articles, + } +} + +func (s *ElisionFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + for _, token := range input { + term := token.Term + for i := 0; i < len(term); { + r, size := utf8.DecodeRune(term[i:]) + if r == Apostrophe || r == RightSingleQuotationMark { + // see if the prefix matches one of the articles + prefix := term[0:i] + _, articleMatch := s.articles[string(prefix)] + if articleMatch { + token.Term = term[i+size:] + break + } + } + i += size + } + } + return input +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/keyword.go b/vendor/github.com/blugelabs/bluge/analysis/token/keyword.go new file mode 100644 index 000000000..11c731fa4 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/keyword.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "github.com/blugelabs/bluge/analysis" +) + +type KeyWordMarkerFilter struct { + keyWords analysis.TokenMap +} + +func NewKeyWordMarkerFilter(keyWords analysis.TokenMap) *KeyWordMarkerFilter { + return &KeyWordMarkerFilter{ + keyWords: keyWords, + } +} + +func (f *KeyWordMarkerFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + for _, token := range input { + _, isKeyWord := f.keyWords[string(token.Term)] + if isKeyWord { + token.KeyWord = true + } + } + return input +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/length.go b/vendor/github.com/blugelabs/bluge/analysis/token/length.go new file mode 100644 index 000000000..143164dc0 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/length.go @@ -0,0 +1,57 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "unicode/utf8" + + "github.com/blugelabs/bluge/analysis" +) + +type LengthFilter struct { + min int + max int +} + +func NewLengthFilter(min, max int) *LengthFilter { + return &LengthFilter{ + min: min, + max: max, + } +} + +func (f *LengthFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + rv := make(analysis.TokenStream, 0, len(input)) + + var skipped int + for _, token := range input { + wordLen := utf8.RuneCount(token.Term) + if f.min > 0 && f.min > wordLen { + skipped += token.PositionIncr + continue + } + if f.max > 0 && f.max < wordLen { + skipped += token.PositionIncr + continue + } + if skipped > 0 { + token.PositionIncr += skipped + skipped = 0 + } + rv = append(rv, token) + } + + return rv +} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/token/lowercase/lowercase.go b/vendor/github.com/blugelabs/bluge/analysis/token/lowercase.go similarity index 80% rename from vendor/github.com/blevesearch/bleve/v2/analysis/token/lowercase/lowercase.go rename to vendor/github.com/blugelabs/bluge/analysis/token/lowercase.go index a1b6dbd05..9fbca3aee 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/token/lowercase/lowercase.go +++ b/vendor/github.com/blugelabs/bluge/analysis/token/lowercase.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,22 +14,17 @@ // Package lowercase implements a TokenFilter which converts // tokens to lower case according to unicode rules. -package lowercase +package token import ( "bytes" "unicode" "unicode/utf8" - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/registry" + "github.com/blugelabs/bluge/analysis" ) -// Name is the name used to register LowerCaseFilter in the bleve registry -const Name = "to_lower" - -type LowerCaseFilter struct { -} +type LowerCaseFilter struct{} func NewLowerCaseFilter() *LowerCaseFilter { return &LowerCaseFilter{} @@ -42,14 +37,6 @@ func (f *LowerCaseFilter) Filter(input analysis.TokenStream) analysis.TokenStrea return input } -func LowerCaseFilterConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) { - return NewLowerCaseFilter(), nil -} - -func init() { - registry.RegisterTokenFilter(Name, LowerCaseFilterConstructor) -} - // toLowerDeferredCopy will function exactly like // bytes.ToLower() only it will reuse (overwrite) // the original byte array when possible @@ -95,9 +82,8 @@ func toLowerDeferredCopy(s []byte) []byte { copy(rv[:j], s[:j]) copy(rv[j:], rest) return rv - } else { - utf8.EncodeRune(s[j:], l) } + utf8.EncodeRune(s[j:], l) i += wid j += lwid } diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/ngram.go b/vendor/github.com/blugelabs/bluge/analysis/token/ngram.go new file mode 100644 index 000000000..f7da4b62e --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/ngram.go @@ -0,0 +1,67 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "bytes" + "unicode/utf8" + + "github.com/blugelabs/bluge/analysis" +) + +type NgramFilter struct { + minLength int + maxLength int +} + +func NewNgramFilter(minLength, maxLength int) *NgramFilter { + return &NgramFilter{ + minLength: minLength, + maxLength: maxLength, + } +} + +func (s *NgramFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + rv := make(analysis.TokenStream, 0, len(input)) + + for _, token := range input { + first := true + runeCount := utf8.RuneCount(token.Term) + runes := bytes.Runes(token.Term) + for i := 0; i < runeCount; i++ { + // index of the starting rune for this token + for ngramSize := s.minLength; ngramSize <= s.maxLength; ngramSize++ { + // build an ngram of this size starting at i + if i+ngramSize <= runeCount { + ngramTerm := analysis.BuildTermFromRunes(runes[i : i+ngramSize]) + token := analysis.Token{ + PositionIncr: 0, + Start: token.Start, + End: token.End, + Type: token.Type, + Term: ngramTerm, + } + if first { + token.PositionIncr = 1 // set first token to offset 1 + first = false + } + rv = append(rv, &token) + } + } + } + } + + return rv +} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/token/porter/porter.go b/vendor/github.com/blugelabs/bluge/analysis/token/porter.go similarity index 71% rename from vendor/github.com/blevesearch/bleve/v2/analysis/token/porter/porter.go rename to vendor/github.com/blugelabs/bluge/analysis/token/porter.go index 95af0fa72..8571f93ab 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/token/porter/porter.go +++ b/vendor/github.com/blugelabs/bluge/analysis/token/porter.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,21 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package porter +package token import ( "bytes" - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/registry" - "github.com/blevesearch/go-porterstemmer" + "github.com/blugelabs/bluge/analysis" ) -const Name = "stemmer_porter" - -type PorterStemmer struct { -} +type PorterStemmer struct{} func NewPorterStemmer() *PorterStemmer { return &PorterStemmer{} @@ -43,11 +38,3 @@ func (s *PorterStemmer) Filter(input analysis.TokenStream) analysis.TokenStream } return input } - -func PorterStemmerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) { - return NewPorterStemmer(), nil -} - -func init() { - registry.RegisterTokenFilter(Name, PorterStemmerConstructor) -} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/reverse.go b/vendor/github.com/blugelabs/bluge/analysis/token/reverse.go new file mode 100644 index 000000000..657e03a84 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/reverse.go @@ -0,0 +1,62 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "unicode" + "unicode/utf8" + + "github.com/blugelabs/bluge/analysis" +) + +type ReverseFilter struct{} + +func NewReverseFilter() *ReverseFilter { + return &ReverseFilter{} +} + +func (f *ReverseFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + for _, token := range input { + token.Term = reverse(token.Term) + } + return input +} + +// reverse(..) will generate a reversed version of the provided +// unicode array and return it back to its caller. +func reverse(s []byte) []byte { + cursorIn := 0 + inputRunes := []rune(string(s)) + cursorOut := len(s) + output := make([]byte, len(s)) + for i := 0; i < len(inputRunes); { + wid := utf8.RuneLen(inputRunes[i]) + i++ + for i < len(inputRunes) { + r := inputRunes[i] + if unicode.Is(unicode.Mn, r) || unicode.Is(unicode.Me, r) || unicode.Is(unicode.Mc, r) { + wid += utf8.RuneLen(r) + i++ + } else { + break + } + } + copy(output[cursorOut-wid:cursorOut], s[cursorIn:cursorIn+wid]) + cursorIn += wid + cursorOut -= wid + } + + return output +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/shingle.go b/vendor/github.com/blugelabs/bluge/analysis/token/shingle.go new file mode 100644 index 000000000..062ae2027 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/shingle.go @@ -0,0 +1,124 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "container/ring" + + "github.com/blugelabs/bluge/analysis" +) + +type ShingleFilter struct { + min int + max int + outputOriginal bool + tokenSeparator string + fill string +} + +func NewShingleFilter(min, max int, outputOriginal bool, sep, fill string) *ShingleFilter { + return &ShingleFilter{ + min: min, + max: max, + outputOriginal: outputOriginal, + tokenSeparator: sep, + fill: fill, + } +} + +func (s *ShingleFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + rv := make(analysis.TokenStream, 0, len(input)) + + aRing := ring.New(s.max) + itemsInRing := 0 + for _, token := range input { + if s.outputOriginal { + rv = append(rv, token) + } + + // if there are gaps, insert filler tokens + offset := token.PositionIncr - 1 + for offset > 0 { + fillerToken := analysis.Token{ + PositionIncr: 1, + Start: -1, + End: -1, + Type: analysis.AlphaNumeric, + Term: []byte(s.fill), + } + aRing.Value = &fillerToken + if itemsInRing < s.max { + itemsInRing++ + } + rv = append(rv, s.shingleCurrentRingState(aRing, itemsInRing)...) + aRing = aRing.Next() + offset-- + } + + aRing.Value = token + if itemsInRing < s.max { + itemsInRing++ + } + rv = append(rv, s.shingleCurrentRingState(aRing, itemsInRing)...) + aRing = aRing.Next() + } + + return rv +} + +func (s *ShingleFilter) shingleCurrentRingState(aRing *ring.Ring, itemsInRing int) analysis.TokenStream { + rv := make(analysis.TokenStream, 0) + for shingleN := s.min; shingleN <= s.max; shingleN++ { + if itemsInRing < shingleN { + continue + } + // if there are enough items in the ring + // to produce a shingle of this size + thisShingleRing := aRing.Move(-(shingleN - 1)) + shingledBytes := make([]byte, 0) + start := -1 + end := 0 + for i := 0; i < shingleN; i++ { + if i != 0 { + shingledBytes = append(shingledBytes, []byte(s.tokenSeparator)...) + } + curr := thisShingleRing.Value.(*analysis.Token) + if start == -1 && curr.Start != -1 { + start = curr.Start + } + if curr.End != -1 { + end = curr.End + } + shingledBytes = append(shingledBytes, curr.Term...) + thisShingleRing = thisShingleRing.Next() + } + token := analysis.Token{ + Type: analysis.Shingle, + Term: shingledBytes, + PositionIncr: 1, + } + if start != -1 { + token.Start = start + } + if end != -1 { + token.End = end + } + if len(rv) > 0 || s.outputOriginal { + token.PositionIncr = 0 + } + rv = append(rv, &token) + } + return rv +} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/token/stop/stop.go b/vendor/github.com/blugelabs/bluge/analysis/token/stop.go similarity index 63% rename from vendor/github.com/blevesearch/bleve/v2/analysis/token/stop/stop.go rename to vendor/github.com/blugelabs/bluge/analysis/token/stop.go index bf4b98db1..88c131886 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/token/stop/stop.go +++ b/vendor/github.com/blugelabs/bluge/analysis/token/stop.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,17 +19,12 @@ // // "stop_token_map" (string): the name of the token map identifying tokens to // remove. -package stop +package token import ( - "fmt" - - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/registry" + "github.com/blugelabs/bluge/analysis" ) -const Name = "stop_tokens" - type StopTokensFilter struct { stopTokens analysis.TokenMap } @@ -41,30 +36,18 @@ func NewStopTokensFilter(stopTokens analysis.TokenMap) *StopTokensFilter { } func (f *StopTokensFilter) Filter(input analysis.TokenStream) analysis.TokenStream { - j := 0 + var j, skipped int for _, token := range input { _, isStopToken := f.stopTokens[string(token.Term)] if !isStopToken { + token.PositionIncr += skipped + skipped = 0 input[j] = token j++ + } else { + skipped += token.PositionIncr } } return input[:j] } - -func StopTokensFilterConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) { - stopTokenMapName, ok := config["stop_token_map"].(string) - if !ok { - return nil, fmt.Errorf("must specify stop_token_map") - } - stopTokenMap, err := cache.TokenMapNamed(stopTokenMapName) - if err != nil { - return nil, fmt.Errorf("error building stop words filter: %v", err) - } - return NewStopTokensFilter(stopTokenMap), nil -} - -func init() { - registry.RegisterTokenFilter(Name, StopTokensFilterConstructor) -} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/truncate.go b/vendor/github.com/blugelabs/bluge/analysis/token/truncate.go new file mode 100644 index 000000000..929fe918d --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/truncate.go @@ -0,0 +1,41 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "unicode/utf8" + + "github.com/blugelabs/bluge/analysis" +) + +type TruncateTokenFilter struct { + length int +} + +func NewTruncateTokenFilter(length int) *TruncateTokenFilter { + return &TruncateTokenFilter{ + length: length, + } +} + +func (s *TruncateTokenFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + for _, token := range input { + wordLen := utf8.RuneCount(token.Term) + if wordLen > s.length { + token.Term = analysis.TruncateRunes(token.Term, wordLen-s.length) + } + } + return input +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/unicodenorm.go b/vendor/github.com/blugelabs/bluge/analysis/token/unicodenorm.go new file mode 100644 index 000000000..ba3f79ccc --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/unicodenorm.go @@ -0,0 +1,37 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "github.com/blugelabs/bluge/analysis" + "golang.org/x/text/unicode/norm" +) + +type UnicodeNormalizeFilter struct { + form norm.Form +} + +func NewUnicodeNormalizeFilter(form norm.Form) *UnicodeNormalizeFilter { + return &UnicodeNormalizeFilter{ + form: form, + } +} + +func (s *UnicodeNormalizeFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + for _, token := range input { + token.Term = s.form.Bytes(token.Term) + } + return input +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/token/unique.go b/vendor/github.com/blugelabs/bluge/analysis/token/unique.go new file mode 100644 index 000000000..66cff8f4a --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/token/unique.go @@ -0,0 +1,47 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "github.com/blugelabs/bluge/analysis" +) + +const initialMapFactor = 4 + +// UniqueTermFilter retains only the tokens which mark the first occurrence of +// a term. Tokens whose term appears in a preceding token are dropped. +type UniqueTermFilter struct{} + +func NewUniqueTermFilter() *UniqueTermFilter { + return &UniqueTermFilter{} +} + +func (f *UniqueTermFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + encounteredTerms := make(map[string]struct{}, len(input)/initialMapFactor) + var j, skipped int + for _, token := range input { + term := string(token.Term) + if _, ok := encounteredTerms[term]; ok { + skipped += token.PositionIncr + continue + } + token.PositionIncr += skipped + skipped = 0 + encounteredTerms[term] = struct{}{} + input[j] = token + j++ + } + return input[:j] +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/tokenizer/character.go b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/character.go new file mode 100644 index 000000000..5fdfac9e8 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/character.go @@ -0,0 +1,73 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tokenizer + +import ( + "unicode/utf8" + + "github.com/blugelabs/bluge/analysis" +) + +type IsTokenRune func(r rune) bool + +type CharacterTokenizer struct { + isTokenRun IsTokenRune +} + +func NewCharacterTokenizer(f IsTokenRune) *CharacterTokenizer { + return &CharacterTokenizer{ + isTokenRun: f, + } +} + +func (c *CharacterTokenizer) Tokenize(input []byte) analysis.TokenStream { + rv := make(analysis.TokenStream, 0, 1024) + + offset := 0 + start := 0 + end := 0 + for currRune, size := utf8.DecodeRune(input[offset:]); currRune != utf8.RuneError; currRune, size = utf8.DecodeRune(input[offset:]) { + isToken := c.isTokenRun(currRune) + if isToken { + end = offset + size + } else { + if end-start > 0 { + // build token + rv = append(rv, &analysis.Token{ + Term: input[start:end], + Start: start, + End: end, + PositionIncr: 1, + Type: analysis.AlphaNumeric, + }) + } + start = offset + size + end = start + } + offset += size + } + // if we ended in the middle of a token, finish it + if end-start > 0 { + // build token + rv = append(rv, &analysis.Token{ + Term: input[start:end], + Start: start, + End: end, + PositionIncr: 1, + Type: analysis.AlphaNumeric, + }) + } + return rv +} diff --git a/vendor/github.com/blugelabs/bluge/analysis/tokenizer/exception.go b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/exception.go new file mode 100644 index 000000000..77f6a8bdd --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/exception.go @@ -0,0 +1,90 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tokenizer + +import ( + "regexp" + + "github.com/blugelabs/bluge/analysis" +) + +// ExceptionsTokenizer implements a Tokenizer which extracts pieces matched by a +// regular expression from the input data, delegates the rest to another +// tokenizer, then insert back extracted parts in the token stream. Use it to +// preserve sequences which a regular tokenizer would alter or remove. +// +// Its constructor takes the following arguments: +// +// "exceptions" ([]string): one or more Go regular expressions matching the +// sequence to preserve. Multiple expressions are combined with "|". +// +// "tokenizer" (string): the name of the tokenizer processing the data not +// matched by "exceptions". +type ExceptionsTokenizer struct { + exception *regexp.Regexp + remaining analysis.Tokenizer +} + +func NewExceptionsTokenizer(exception *regexp.Regexp, remaining analysis.Tokenizer) *ExceptionsTokenizer { + return &ExceptionsTokenizer{ + exception: exception, + remaining: remaining, + } +} + +func (t *ExceptionsTokenizer) Tokenize(input []byte) analysis.TokenStream { + rv := make(analysis.TokenStream, 0) + matches := t.exception.FindAllIndex(input, -1) + currInput := 0 + for _, match := range matches { + start := match[0] + end := match[1] + if start > currInput { + // need to defer to remaining for unprocessed section + intermediate := t.remaining.Tokenize(input[currInput:start]) + // add intermediate tokens to our result stream + for _, token := range intermediate { + // adjust token offsets + token.Start += currInput + token.End += currInput + rv = append(rv, token) + } + } + + // create single token with this regexp match + token := &analysis.Token{ + Term: input[start:end], + Start: start, + End: end, + PositionIncr: 1, + } + rv = append(rv, token) + currInput = end + } + + if currInput < len(input) { + // need to defer to remaining for unprocessed section + intermediate := t.remaining.Tokenize(input[currInput:]) + // add intermediate tokens to our result stream + for _, token := range intermediate { + // adjust token offsets + token.Start += currInput + token.End += currInput + rv = append(rv, token) + } + } + + return rv +} diff --git a/vendor/github.com/blevesearch/zapx/v13/plugin.go b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/letter.go similarity index 72% rename from vendor/github.com/blevesearch/zapx/v13/plugin.go rename to vendor/github.com/blugelabs/bluge/analysis/tokenizer/letter.go index f67297ec2..dd0666892 100644 --- a/vendor/github.com/blevesearch/zapx/v13/plugin.go +++ b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/letter.go @@ -12,16 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package tokenizer -// ZapPlugin implements the Plugin interface of -// the blevesearch/scorch_segment_api pkg -type ZapPlugin struct{} +import ( + "unicode" +) -func (*ZapPlugin) Type() string { - return Type -} - -func (*ZapPlugin) Version() uint32 { - return Version +func NewLetterTokenizer() *CharacterTokenizer { + return NewCharacterTokenizer(unicode.IsLetter) } diff --git a/vendor/github.com/blugelabs/bluge/analysis/tokenizer/regexp.go b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/regexp.go new file mode 100644 index 000000000..60432332f --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/regexp.go @@ -0,0 +1,64 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tokenizer + +import ( + "regexp" + "strconv" + + "github.com/blugelabs/bluge/analysis" +) + +var IdeographRegexp = regexp.MustCompile(`\p{Han}|\p{Hangul}|\p{Hiragana}|\p{Katakana}`) + +type RegexpTokenizer struct { + r *regexp.Regexp +} + +func NewRegexpTokenizer(r *regexp.Regexp) *RegexpTokenizer { + return &RegexpTokenizer{ + r: r, + } +} + +func (rt *RegexpTokenizer) Tokenize(input []byte) analysis.TokenStream { + matches := rt.r.FindAllIndex(input, -1) + rv := make(analysis.TokenStream, 0, len(matches)) + for _, match := range matches { + matchBytes := input[match[0]:match[1]] + if match[1]-match[0] > 0 { + token := analysis.Token{ + Term: matchBytes, + Start: match[0], + End: match[1], + PositionIncr: 1, + Type: detectTokenType(matchBytes), + } + rv = append(rv, &token) + } + } + return rv +} + +func detectTokenType(termBytes []byte) analysis.TokenType { + if IdeographRegexp.Match(termBytes) { + return analysis.Ideographic + } + _, err := strconv.ParseFloat(string(termBytes), 64) + if err == nil { + return analysis.Numeric + } + return analysis.AlphaNumeric +} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/tokenizer/single/single.go b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/single.go similarity index 57% rename from vendor/github.com/blevesearch/bleve/v2/analysis/tokenizer/single/single.go rename to vendor/github.com/blugelabs/bluge/analysis/tokenizer/single.go index a3eac7899..e188afef7 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/tokenizer/single/single.go +++ b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/single.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,38 +12,34 @@ // See the License for the specific language governing permissions and // limitations under the License. -package single +package tokenizer import ( - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/registry" + "github.com/blugelabs/bluge/analysis" ) -const Name = "single" - -type SingleTokenTokenizer struct { -} +type SingleTokenTokenizer struct{} func NewSingleTokenTokenizer() *SingleTokenTokenizer { return &SingleTokenTokenizer{} } func (t *SingleTokenTokenizer) Tokenize(input []byte) analysis.TokenStream { - return analysis.TokenStream{ - &analysis.Token{ - Term: input, - Position: 1, - Start: 0, - End: len(input), - Type: analysis.AlphaNumeric, - }, - } + return MakeTokenStream(input) } -func SingleTokenTokenizerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.Tokenizer, error) { - return NewSingleTokenTokenizer(), nil +func MakeToken(input []byte) *analysis.Token { + return &analysis.Token{ + Term: input, + PositionIncr: 1, + Start: 0, + End: len(input), + Type: analysis.AlphaNumeric, + } } -func init() { - registry.RegisterTokenizer(Name, SingleTokenTokenizerConstructor) +func MakeTokenStream(input []byte) analysis.TokenStream { + return analysis.TokenStream{ + MakeToken(input), + } } diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/tokenizer/unicode/unicode.go b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/unicode.go similarity index 80% rename from vendor/github.com/blevesearch/bleve/v2/analysis/tokenizer/unicode/unicode.go rename to vendor/github.com/blugelabs/bluge/analysis/tokenizer/unicode.go index ca3cfe76c..1db362ddd 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/tokenizer/unicode/unicode.go +++ b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/unicode.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,19 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -package unicode +package tokenizer import ( "github.com/blevesearch/segment" - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/registry" + "github.com/blugelabs/bluge/analysis" ) -const Name = "unicode" +const maxEstimatedRemainingSegments = 1000 +const maxRvCapacity = 256 -type UnicodeTokenizer struct { -} +type UnicodeTokenizer struct{} func NewUnicodeTokenizer() *UnicodeTokenizer { return &UnicodeTokenizer{} @@ -39,7 +38,6 @@ func (rt *UnicodeTokenizer) Tokenize(input []byte) analysis.TokenStream { segmenter := segment.NewWordSegmenterDirect(input) start := 0 - pos := 1 guessRemaining := func(end int) int { avgSegmentLen := end / (len(rv) + 1) @@ -58,8 +56,8 @@ func (rt *UnicodeTokenizer) Tokenize(input []byte) analysis.TokenStream { if segmenter.Type() != segment.None { if taNext >= len(ta) { remainingSegments := guessRemaining(end) - if remainingSegments > 1000 { - remainingSegments = 1000 + if remainingSegments > maxEstimatedRemainingSegments { + remainingSegments = maxEstimatedRemainingSegments } if remainingSegments < 1 { remainingSegments = 1 @@ -75,22 +73,21 @@ func (rt *UnicodeTokenizer) Tokenize(input []byte) analysis.TokenStream { token.Term = segmentBytes token.Start = start token.End = end - token.Position = pos + token.PositionIncr = 1 token.Type = convertType(segmenter.Type()) if len(rv) >= cap(rv) { // When rv is full, save it into rvx. rvx = append(rvx, rv) rvCap := cap(rv) * 2 - if rvCap > 256 { - rvCap = 256 + if rvCap > maxRvCapacity { + rvCap = maxRvCapacity } rv = make(analysis.TokenStream, 0, rvCap) // Next rv cap is bigger. } rv = append(rv, token) - pos++ } start = end } @@ -110,14 +107,6 @@ func (rt *UnicodeTokenizer) Tokenize(input []byte) analysis.TokenStream { return rv } -func UnicodeTokenizerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.Tokenizer, error) { - return NewUnicodeTokenizer(), nil -} - -func init() { - registry.RegisterTokenizer(Name, UnicodeTokenizerConstructor) -} - func convertType(segmentWordType int) analysis.TokenType { switch segmentWordType { case segment.Ideo: diff --git a/vendor/github.com/blugelabs/bluge/analysis/tokenizer/web.go b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/web.go new file mode 100644 index 000000000..607abe2d2 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/web.go @@ -0,0 +1,32 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tokenizer + +import ( + "regexp" + "strings" +) + +var email = `(?:[a-z0-9!#$%&'*+/=?^_` + "`" + `{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_` + "`" + `{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])` +var url = `(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s` + "`" + `!()\[\]{};:'".,<>?«»“”‘’]))` +var twitterHandle = `@([a-zA-Z0-9_]){1,15}` +var twitterHashtag = `#([a-zA-Z0-9_])+` +var exceptions = []string{email, url, twitterHandle, twitterHashtag} + +var exceptionsRegexp = regexp.MustCompile(strings.Join(exceptions, "|")) + +func NewWebTokenizer() *ExceptionsTokenizer { + return NewExceptionsTokenizer(exceptionsRegexp, NewUnicodeTokenizer()) +} diff --git a/vendor/github.com/blevesearch/zapx/v12/plugin.go b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/whitespace.go similarity index 72% rename from vendor/github.com/blevesearch/zapx/v12/plugin.go rename to vendor/github.com/blugelabs/bluge/analysis/tokenizer/whitespace.go index f67297ec2..52a263d88 100644 --- a/vendor/github.com/blevesearch/zapx/v12/plugin.go +++ b/vendor/github.com/blugelabs/bluge/analysis/tokenizer/whitespace.go @@ -12,16 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package tokenizer -// ZapPlugin implements the Plugin interface of -// the blevesearch/scorch_segment_api pkg -type ZapPlugin struct{} +import ( + "unicode" +) -func (*ZapPlugin) Type() string { - return Type +func NewWhitespaceTokenizer() *CharacterTokenizer { + return NewCharacterTokenizer(notSpace) } -func (*ZapPlugin) Version() uint32 { - return Version +func notSpace(r rune) bool { + return !unicode.IsSpace(r) } diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/tokenmap.go b/vendor/github.com/blugelabs/bluge/analysis/tokenmap.go similarity index 91% rename from vendor/github.com/blevesearch/bleve/v2/analysis/tokenmap.go rename to vendor/github.com/blugelabs/bluge/analysis/tokenmap.go index 7c0d0a890..135cc1521 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/tokenmap.go +++ b/vendor/github.com/blugelabs/bluge/analysis/tokenmap.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ import ( type TokenMap map[string]bool func NewTokenMap() TokenMap { - return make(TokenMap, 0) + return make(TokenMap) } // LoadFile reads in a list of tokens from a text file, @@ -36,13 +36,14 @@ func (t TokenMap) LoadFile(filename string) error { if err != nil { return err } - return t.LoadBytes(data) + t.LoadBytes(data) + return nil } // LoadBytes reads in a list of tokens from memory, // one per line. // Comments are supported using `#` or `|` -func (t TokenMap) LoadBytes(data []byte) error { +func (t TokenMap) LoadBytes(data []byte) { bytesReader := bytes.NewReader(data) bufioReader := bufio.NewReader(bytesReader) line, err := bufioReader.ReadString('\n') @@ -53,9 +54,9 @@ func (t TokenMap) LoadBytes(data []byte) error { // if the err was EOF we still need to process the last value if err == io.EOF { t.LoadLine(line) - return nil + } else { + panic(err) } - return err } func (t TokenMap) LoadLine(line string) { diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/type.go b/vendor/github.com/blugelabs/bluge/analysis/type.go similarity index 71% rename from vendor/github.com/blevesearch/bleve/v2/analysis/type.go rename to vendor/github.com/blugelabs/bluge/analysis/type.go index 589cc1ca6..7b6776522 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/type.go +++ b/vendor/github.com/blugelabs/bluge/analysis/type.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ package analysis import ( "fmt" - "time" ) type CharFilter interface { @@ -41,26 +40,25 @@ const ( type Token struct { // Start specifies the byte offset of the beginning of the term in the // field. - Start int `json:"start"` + Start int // End specifies the byte offset of the end of the term in the field. - End int `json:"end"` - Term []byte `json:"term"` + End int + Term []byte - // Position specifies the 1-based index of the token in the sequence of - // occurrences of its term in the field. - Position int `json:"position"` - Type TokenType `json:"type"` - KeyWord bool `json:"keyword"` + // PositionIncr specifies the position of this token relative to the previous. + PositionIncr int + Type TokenType + KeyWord bool } func (t *Token) String() string { - return fmt.Sprintf("Start: %d End: %d Position: %d Token: %s Type: %d", t.Start, t.End, t.Position, string(t.Term), t.Type) + return fmt.Sprintf("Start: %d End: %d PositionIncr: %d Token: %s Type: %d", t.Start, t.End, t.PositionIncr, string(t.Term), t.Type) } type TokenStream []*Token -// A Tokenizer splits an input string into tokens, the usual behaviour being to +// A Tokenizer splits an input string into tokens, the usual behavior being to // map words to tokens. type Tokenizer interface { Tokenize([]byte) TokenStream @@ -91,13 +89,3 @@ func (a *Analyzer) Analyze(input []byte) TokenStream { } return tokens } - -var ErrInvalidDateTime = fmt.Errorf("unable to parse datetime with any of the layouts") - -type DateTimeParser interface { - ParseDateTime(string) (time.Time, error) -} - -type ByteArrayConverter interface { - Convert([]byte) (interface{}, error) -} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/util.go b/vendor/github.com/blugelabs/bluge/analysis/util.go similarity index 100% rename from vendor/github.com/blevesearch/bleve/v2/analysis/util.go rename to vendor/github.com/blugelabs/bluge/analysis/util.go diff --git a/vendor/github.com/blugelabs/bluge/batch.go b/vendor/github.com/blugelabs/bluge/batch.go new file mode 100644 index 000000000..d1f9d3460 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/batch.go @@ -0,0 +1,36 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + "github.com/blugelabs/bluge/index" +) + +const _idField = "_id" + +type Identifier string + +func (i Identifier) Field() string { + return _idField +} + +func (i Identifier) Term() []byte { + return []byte(i) +} + +// NewBatch creates a new empty batch. +func NewBatch() *index.Batch { + return index.NewBatch() +} diff --git a/vendor/github.com/blugelabs/bluge/config.go b/vendor/github.com/blugelabs/bluge/config.go new file mode 100644 index 000000000..7f21dc77e --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/config.go @@ -0,0 +1,118 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + "io/ioutil" + "log" + + "github.com/blugelabs/bluge/index" + + "github.com/blugelabs/bluge/search" + "github.com/blugelabs/bluge/search/similarity" + + "github.com/blugelabs/bluge/analysis" + "github.com/blugelabs/bluge/analysis/analyzer" +) + +type Config struct { + indexConfig index.Config + Logger *log.Logger + + DefaultSearchField string + DefaultSearchAnalyzer *analysis.Analyzer + DefaultSimilarity search.Similarity + PerFieldSimilarity map[string]search.Similarity + + SearchStartFunc func(size uint64) error + SearchEndFunc func(size uint64) +} + +// WithVirtualField allows you to describe a field that +// the index will behave as if all documents in this index were +// indexed with these field/terms, even though nothing is +// physically persisted about them in the index. +func (config Config) WithVirtualField(field Field) Config { + _ = field.Analyze(0) + config.indexConfig = config.indexConfig.WithVirtualField(field) + return config +} + +func (config Config) WithSegmentType(typ string) Config { + config.indexConfig = config.indexConfig.WithSegmentType(typ) + return config +} + +func (config Config) WithSegmentVersion(ver uint32) Config { + config.indexConfig = config.indexConfig.WithSegmentVersion(ver) + return config +} + +func (config Config) DisableOptimizeConjunction() Config { + config.indexConfig = config.indexConfig.DisableOptimizeConjunction() + return config +} + +func (config Config) DisableOptimizeConjunctionUnadorned() Config { + config.indexConfig = config.indexConfig.DisableOptimizeConjunctionUnadorned() + return config +} + +func (config Config) DisableOptimizeDisjunctionUnadorned() Config { + config.indexConfig = config.indexConfig.DisableOptimizeDisjunctionUnadorned() + return config +} + +func (config Config) WithSearchStartFunc(f func(size uint64) error) Config { + config.SearchStartFunc = f + return config +} + +func DefaultConfig(path string) Config { + indexConfig := index.DefaultConfig(path) + return defaultConfig(indexConfig) +} + +func InMemoryOnlyConfig() Config { + indexConfig := index.InMemoryOnlyConfig() + return defaultConfig(indexConfig) +} +func DefaultConfigWithDirectory(df func() index.Directory) Config { + indexConfig := index.DefaultConfigWithDirectory(df) + return defaultConfig(indexConfig) +} + +func defaultConfig(indexConfig index.Config) Config { + rv := Config{ + Logger: log.New(ioutil.Discard, "bluge", log.LstdFlags), + DefaultSearchField: "_all", + DefaultSearchAnalyzer: analyzer.NewStandardAnalyzer(), + DefaultSimilarity: similarity.NewBM25Similarity(), + PerFieldSimilarity: map[string]search.Similarity{}, + } + + allDocsFields := NewKeywordField("", "") + _ = allDocsFields.Analyze(0) + indexConfig = indexConfig.WithVirtualField(allDocsFields) + indexConfig = indexConfig.WithNormCalc(func(field string, length int) float32 { + if pfs, ok := rv.PerFieldSimilarity[field]; ok { + return pfs.ComputeNorm(length) + } + return rv.DefaultSimilarity.ComputeNorm(length) + }) + rv.indexConfig = indexConfig + + return rv +} diff --git a/vendor/github.com/blugelabs/bluge/doc.go b/vendor/github.com/blugelabs/bluge/doc.go new file mode 100644 index 000000000..b52f176dc --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/doc.go @@ -0,0 +1,70 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package bluge is a library for indexing and searching text. + +Example Opening New Index, Indexing Data + + config := bluge.DefaultConfig(path) + writer, err := bluge.OpenWriter(config) + if err != nil { + log.Fatalf("error opening writer: %v", err) + } + defer writer.Close() + + doc := bluge.NewDocument("example"). + AddField(bluge.NewTextField("name", "bluge")) + + err = writer.Update(doc.ID(), doc) + if err != nil { + log.Fatalf("error updating document: %v", err) + } + +Example Getting Index Reader, Searching Data + + reader, err := writer.Reader() + if err != nil { + log.Fatalf("error getting index reader: %v", err) + } + defer reader.Close() + + query := bluge.NewMatchQuery("bluge").SetField("name") + request := bluge.NewTopNSearch(10, query). + WithStandardAggregations() + documentMatchIterator, err := reader.Search(context.Background(), request) + if err != nil { + log.Fatalf("error executing search: %v", err) + } + match, err := documentMatchIterator.Next() + for err == nil && match != nil { + + // load the identifier for this match + err = match.VisitStoredFields(func(field string, value []byte) bool { + if field == "_id" { + fmt.Printf("match: %s\n", string(value)) + } + return true + }) + if err != nil { + log.Fatalf("error loading stored fields: %v", err) + } + match, err = documentMatchIterator.Next() + } + if err != nil { + log.Fatalf("error iterator document matches: %v", err) + } + +*/ +package bluge diff --git a/vendor/github.com/blugelabs/bluge/document.go b/vendor/github.com/blugelabs/bluge/document.go new file mode 100644 index 000000000..4d5105f99 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/document.go @@ -0,0 +1,93 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + segment "github.com/blugelabs/bluge_segment_api" +) + +type Document []Field + +func NewDocument(id string) *Document { + return &Document{ + NewKeywordField(_idField, id).StoreValue().Sortable(), + } +} + +func NewDocumentWithIdentifier(id Identifier) *Document { + return &Document{ + NewKeywordFieldBytes(id.Field(), id.Term()).StoreValue().Sortable(), + } +} + +func (d Document) Size() int { + sizeInBytes := sizeOfSlice + + for _, entry := range d { + sizeInBytes += entry.Size() + } + + return sizeInBytes +} + +// ID is an experimental helper method +// to simplify common use cases +func (d Document) ID() segment.Term { + return Identifier(d[0].Value()) +} + +func (d *Document) AddField(f Field) *Document { + *d = append(*d, f) + return d +} + +// FieldConsumer is anything which can consume a field +// Fields can implement this interface to consume the +// content of another field. +type FieldConsumer interface { + Consume(Field) +} + +func (d Document) Analyze() { + fieldOffsets := map[string]int{} + for _, field := range d { + if !field.Index() { + continue + } + fieldOffset := fieldOffsets[field.Name()] + if fieldOffset > 0 { + fieldOffset += field.PositionIncrementGap() + } + lastPos := field.Analyze(fieldOffset) + fieldOffsets[field.Name()] = lastPos + + // see if any of the composite fields need this + for _, otherField := range d { + if otherField == field { + // never include yourself + continue + } + if fieldConsumer, ok := otherField.(FieldConsumer); ok { + fieldConsumer.Consume(field) + } + } + } +} + +func (d Document) EachField(vf segment.VisitField) { + for _, field := range d { + vf(field) + } +} diff --git a/vendor/github.com/blugelabs/bluge/field.go b/vendor/github.com/blugelabs/bluge/field.go new file mode 100644 index 000000000..bcda38973 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/field.go @@ -0,0 +1,452 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + "time" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blugelabs/bluge/analysis/analyzer" + + "github.com/blugelabs/bluge/analysis" + "github.com/blugelabs/bluge/numeric" + "github.com/blugelabs/bluge/numeric/geo" +) + +type FieldOptions int + +const ( + Index FieldOptions = 1 << iota + Store + SearchTermPositions + HighlightMatches + Sortable + Aggregatable +) + +func (o FieldOptions) Index() bool { + return o&Index != 0 +} + +func (o FieldOptions) Store() bool { + return o&Store != 0 +} + +func (o FieldOptions) IncludeLocations() bool { + return o&SearchTermPositions != 0 || o&HighlightMatches != 0 +} + +func (o FieldOptions) IndexDocValues() bool { + return o&Sortable != 0 || o&Aggregatable != 0 +} + +type Field interface { + segment.Field + + Analyze(int) int + AnalyzedTokenFrequencies() analysis.TokenFrequencies + + PositionIncrementGap() int + + Size() int +} + +type TermField struct { + FieldOptions + name string + value []byte + numPlainTextBytes int + analyzedLength int + analyzedTokenFreqs analysis.TokenFrequencies + analyzer Analyzer + positionIncrementGap int +} + +func (b *TermField) PositionIncrementGap() int { + return b.positionIncrementGap +} + +func (b *TermField) SetPositionIncrementGap(positionIncrementGap int) *TermField { + b.positionIncrementGap = positionIncrementGap + return b +} + +func (b *TermField) Name() string { + return b.name +} + +func (b *TermField) Size() int { + return reflectStaticSizeBaseField + sizeOfPtr + + len(b.name) + + len(b.value) +} + +func (b *TermField) AnalyzedLength() int { + return b.analyzedLength +} + +func (b *TermField) AnalyzedTokenFrequencies() analysis.TokenFrequencies { + return b.analyzedTokenFreqs +} + +func (b *TermField) Value() []byte { + return b.value +} + +func (b *TermField) NumPlainTextBytes() int { + return b.numPlainTextBytes +} + +func (b *TermField) StoreValue() *TermField { + b.FieldOptions |= Store + return b +} + +func (b *TermField) Sortable() *TermField { + b.FieldOptions |= Sortable + return b +} + +func (b *TermField) Aggregatable() *TermField { + b.FieldOptions |= Aggregatable + return b +} + +func (b *TermField) SearchTermPositions() *TermField { + b.FieldOptions |= SearchTermPositions + return b +} + +func (b *TermField) HighlightMatches() *TermField { + b.FieldOptions |= HighlightMatches + return b +} + +func (b *TermField) EachTerm(vt segment.VisitTerm) { + for _, v := range b.analyzedTokenFreqs { + vt(v) + } +} + +func (b *TermField) Length() int { + return b.analyzedLength +} + +func (b *TermField) baseAnalayze(typ analysis.TokenType) analysis.TokenStream { + var tokens analysis.TokenStream + tokens = append(tokens, &analysis.Token{ + Start: 0, + End: len(b.value), + Term: b.value, + PositionIncr: 1, + Type: typ, + }) + return tokens +} + +func (b *TermField) WithAnalyzer(fieldAnalyzer Analyzer) *TermField { + b.analyzer = fieldAnalyzer + return b +} + +func (b *TermField) Analyze(startOffset int) (lastPos int) { + var tokens analysis.TokenStream + if b.analyzer != nil { + bytesToAnalyze := b.Value() + if b.Store() { + // need to copy + bytesCopied := make([]byte, len(bytesToAnalyze)) + copy(bytesCopied, bytesToAnalyze) + bytesToAnalyze = bytesCopied + } + tokens = b.analyzer.Analyze(bytesToAnalyze) + } else { + tokens = b.baseAnalayze(analysis.AlphaNumeric) + } + b.analyzedLength = len(tokens) // number of tokens in this doc field + b.analyzedTokenFreqs, lastPos = analysis.TokenFrequency(tokens, b.IncludeLocations(), startOffset) + return lastPos +} + +const defaultTextIndexingOptions = Index + +type Analyzer interface { + Analyze(input []byte) analysis.TokenStream +} + +var standardAnalyzer = analyzer.NewStandardAnalyzer() + +func NewKeywordField(name, value string) *TermField { + return newTextField(name, []byte(value), nil) +} + +func NewKeywordFieldBytes(name string, value []byte) *TermField { + return newTextField(name, value, nil) +} + +func NewTextField(name, value string) *TermField { + return newTextField(name, []byte(value), standardAnalyzer) +} + +func NewTextFieldBytes(name string, value []byte) *TermField { + return newTextField(name, value, standardAnalyzer) +} + +func newTextField(name string, value []byte, fieldAnalyzer Analyzer) *TermField { + return &TermField{ + FieldOptions: defaultTextIndexingOptions, + name: name, + value: value, + numPlainTextBytes: len(value), + analyzer: fieldAnalyzer, + positionIncrementGap: 100, + } +} + +const defaultNumericIndexingOptions = Index | Sortable | Aggregatable + +const defaultNumericPrecisionStep uint = 4 + +func addShiftTokens(tokens analysis.TokenStream, original int64, shiftBy uint, typ analysis.TokenType) analysis.TokenStream { + shift := shiftBy + for shift < 64 { + shiftEncoded, err := numeric.NewPrefixCodedInt64(original, shift) + if err != nil { + break + } + token := analysis.Token{ + Start: 0, + End: len(shiftEncoded), + Term: shiftEncoded, + PositionIncr: 0, + Type: typ, + } + tokens = append(tokens, &token) + shift += shiftBy + } + return tokens +} + +type numericAnalyzer struct { + tokenType analysis.TokenType + shiftBy uint +} + +func (n *numericAnalyzer) Analyze(input []byte) analysis.TokenStream { + tokens := analysis.TokenStream{ + &analysis.Token{ + Start: 0, + End: len(input), + Term: input, + PositionIncr: 1, + Type: n.tokenType, + }, + } + original, err := numeric.PrefixCoded(input).Int64() + if err == nil { + tokens = addShiftTokens(tokens, original, n.shiftBy, n.tokenType) + } + return tokens +} + +func NewNumericField(name string, number float64) *TermField { + return newNumericFieldWithIndexingOptions(name, number, defaultNumericIndexingOptions) +} + +func newNumericFieldWithIndexingOptions(name string, number float64, options FieldOptions) *TermField { + numberInt64 := numeric.Float64ToInt64(number) + prefixCoded := numeric.MustNewPrefixCodedInt64(numberInt64, 0) + return &TermField{ + FieldOptions: options, + name: name, + value: prefixCoded, + numPlainTextBytes: 8, + analyzer: &numericAnalyzer{ + tokenType: analysis.Numeric, + shiftBy: defaultNumericPrecisionStep, + }, + positionIncrementGap: 100, + } +} + +func DecodeNumericFloat64(value []byte) (float64, error) { + i64, err := numeric.PrefixCoded(value).Int64() + if err != nil { + return 0, err + } + return numeric.Int64ToFloat64(i64), nil +} + +const defaultDateTimeIndexingOptions = Index | Sortable | Aggregatable + +const defaultDateTimePrecisionStep uint = 4 + +func NewDateTimeField(name string, dt time.Time) *TermField { + dtInt64 := dt.UnixNano() + prefixCoded := numeric.MustNewPrefixCodedInt64(dtInt64, 0) + return &TermField{ + FieldOptions: defaultDateTimeIndexingOptions, + name: name, + value: prefixCoded, + numPlainTextBytes: 8, + analyzer: &numericAnalyzer{ + tokenType: analysis.DateTime, + shiftBy: defaultDateTimePrecisionStep, + }, + positionIncrementGap: 100, + } +} + +func DecodeDateTime(value []byte) (time.Time, error) { + i64, err := numeric.PrefixCoded(value).Int64() + if err != nil { + return time.Time{}, err + } + return time.Unix(0, i64).UTC(), nil +} + +var geoPrecisionStep uint = 9 + +func NewGeoPointField(name string, lon, lat float64) *TermField { + mHash := geo.MortonHash(lon, lat) + prefixCoded := numeric.MustNewPrefixCodedInt64(int64(mHash), 0) + return &TermField{ + FieldOptions: defaultNumericIndexingOptions, + name: name, + value: prefixCoded, + numPlainTextBytes: 8, + analyzer: &numericAnalyzer{ + tokenType: analysis.Numeric, + shiftBy: geoPrecisionStep, + }, + positionIncrementGap: 100, + } +} + +func DecodeGeoLonLat(value []byte) (lon, lat float64, err error) { + i64, err := numeric.PrefixCoded(value).Int64() + if err != nil { + return 0, 0, err + } + return geo.MortonUnhashLon(uint64(i64)), geo.MortonUnhashLat(uint64(i64)), nil +} + +const defaultCompositeIndexingOptions = Index + +type CompositeField struct { + *TermField + includedFields map[string]bool + excludedFields map[string]bool + defaultInclude bool +} + +func NewCompositeFieldIncluding(name string, including []string) *CompositeField { + return newCompositeFieldWithIndexingOptions(name, false, including, + nil, defaultCompositeIndexingOptions) +} + +func NewCompositeFieldExcluding(name string, excluding []string) *CompositeField { + return newCompositeFieldWithIndexingOptions(name, true, nil, + excluding, defaultCompositeIndexingOptions) +} + +func NewCompositeField(name string, defaultInclude bool, include, exclude []string) *CompositeField { + return newCompositeFieldWithIndexingOptions(name, defaultInclude, include, exclude, defaultCompositeIndexingOptions) +} + +func newCompositeFieldWithIndexingOptions(name string, defaultInclude bool, include, exclude []string, + options FieldOptions) *CompositeField { + rv := &CompositeField{ + TermField: &TermField{ + FieldOptions: options, + name: name, + analyzedTokenFreqs: make(analysis.TokenFrequencies), + }, + defaultInclude: defaultInclude, + includedFields: make(map[string]bool, len(include)), + excludedFields: make(map[string]bool, len(exclude)), + } + + for _, i := range include { + rv.includedFields[i] = true + } + for _, e := range exclude { + rv.excludedFields[e] = true + } + + return rv +} + +func (c *CompositeField) Size() int { + sizeInBytes := c.TermField.Size() + + for k := range c.includedFields { + sizeInBytes += sizeOfString + len(k) + sizeOfBool + } + + for k := range c.excludedFields { + sizeInBytes += sizeOfString + len(k) + sizeOfBool + } + + return sizeInBytes +} + +func (c *CompositeField) Analyze(int) int { + return 0 +} + +func (c *CompositeField) PositionIncrementGap() int { + return 0 +} + +func (c *CompositeField) includesField(field string) bool { + shouldInclude := c.defaultInclude + _, fieldShouldBeIncluded := c.includedFields[field] + if fieldShouldBeIncluded { + shouldInclude = true + } + _, fieldShouldBeExcluded := c.excludedFields[field] + if fieldShouldBeExcluded { + shouldInclude = false + } + return shouldInclude +} + +func (c *CompositeField) Consume(field Field) { + if c.includesField(field.Name()) { + c.analyzedLength += field.Length() + c.analyzedTokenFreqs.MergeAll(field.Name(), field.AnalyzedTokenFrequencies()) + } +} + +func (c *CompositeField) EachTerm(vt segment.VisitTerm) { + for _, v := range c.analyzedTokenFreqs { + vt(v) + } +} + +func (c *CompositeField) Length() int { + return c.analyzedLength +} + +func NewStoredOnlyField(name string, value []byte) *TermField { + return &TermField{ + FieldOptions: Store, + name: name, + value: value, + numPlainTextBytes: len(value), + } +} diff --git a/vendor/github.com/blugelabs/bluge/index/batch.go b/vendor/github.com/blugelabs/bluge/index/batch.go new file mode 100644 index 000000000..b17d48436 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/batch.go @@ -0,0 +1,54 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import segment "github.com/blugelabs/bluge_segment_api" + +type Batch struct { + documents []segment.Document + ids []segment.Term + persistedCallback func(error) +} + +func NewBatch() *Batch { + return &Batch{} +} + +func (b *Batch) Insert(doc segment.Document) { + b.documents = append(b.documents, doc) +} + +func (b *Batch) Update(id segment.Term, doc segment.Document) { + b.documents = append(b.documents, doc) + b.ids = append(b.ids, id) +} + +func (b *Batch) Delete(id segment.Term) { + b.ids = append(b.ids, id) +} + +func (b *Batch) Reset() { + b.documents = b.documents[:0] + b.ids = b.ids[:0] + b.persistedCallback = nil +} + +func (b *Batch) SetPersistedCallback(f func(error)) { + b.persistedCallback = f +} + +func (b *Batch) PersistedCallback() func(error) { + return b.persistedCallback +} diff --git a/vendor/github.com/blugelabs/bluge/index/communication.go b/vendor/github.com/blugelabs/bluge/index/communication.go new file mode 100644 index 000000000..30cd8910b --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/communication.go @@ -0,0 +1,57 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import segment "github.com/blugelabs/bluge_segment_api" + +type notificationChan chan struct{} + +type epochWatcher struct { + epoch uint64 + notifyCh notificationChan +} + +type epochWatchers []*epochWatcher + +func (e *epochWatchers) Add(watcher *epochWatcher) { + *e = append(*e, watcher) +} + +func (e *epochWatchers) NotifySatisfiedWatchers(epoch uint64) { + var epochWatchersNext epochWatchers + for _, w := range *e { + if w.epoch < epoch { + close(w.notifyCh) + } else { + epochWatchersNext.Add(w) + } + } + *e = epochWatchersNext +} + +type watcherChan chan *epochWatcher + +func (w watcherChan) NotifyUsAfter(epoch uint64, closeCh chan struct{}) (*epochWatcher, error) { + ew := &epochWatcher{ + epoch: epoch, + notifyCh: make(notificationChan, 1), + } + select { + case <-closeCh: + return nil, segment.ErrClosed + case w <- ew: + } + return ew, nil +} diff --git a/vendor/github.com/blugelabs/bluge/index/config.go b/vendor/github.com/blugelabs/bluge/index/config.go new file mode 100644 index 000000000..3bb4962a3 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/config.go @@ -0,0 +1,222 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "math" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blugelabs/bluge/index/mergeplan" + "github.com/blugelabs/ice" +) + +type Config struct { + SegmentType string + SegmentVersion uint32 + + supportedSegmentPlugins map[string]map[uint32]*SegmentPlugin + + UnsafeBatch bool + EventCallback func(Event) + AsyncError func(error) + MergePlanOptions mergeplan.Options + NumAnalysisWorkers int + AnalysisChan chan func() + GoFunc func(func()) + DeletionPolicyFunc func() DeletionPolicy + DirectoryFunc func() Directory + NormCalc func(string, int) float32 + + MergeBufferSize int + + // Optimizations + OptimizeConjunction bool + OptimizeConjunctionUnadorned bool + OptimizeDisjunctionUnadorned bool + + // MinSegmentsForInMemoryMerge represents the number of + // in-memory zap segments that persistSnapshotMaybeMerge() needs to + // see in an Snapshot before it decides to merge and persist + // those segments + MinSegmentsForInMemoryMerge int + + // PersisterNapTimeMSec controls the wait/delay injected into + // persistence workloop to improve the chances for + // a healthier and heavier in-memory merging + PersisterNapTimeMSec int + + // PersisterNapTimeMSec > 0, and the number of files is less than + // PersisterNapUnderNumFiles, then the persister will sleep + // PersisterNapTimeMSec amount of time to improve the chances for + // a healthier and heavier in-memory merging + PersisterNapUnderNumFiles int + + // MemoryPressurePauseThreshold let persister to have a better leeway + // for prudently performing the memory merge of segments on a memory + // pressure situation. Here the config value is an upper threshold + // for the number of paused application threads. The default value would + // be a very high number to always favor the merging of memory segments. + MemoryPressurePauseThreshold int + + ValidateSnapshotCRC bool + + virtualFields map[string][]segment.Field +} + +func (config Config) WithSegmentType(typ string) Config { + config.SegmentType = typ + return config +} + +func (config Config) WithSegmentVersion(ver uint32) Config { + config.SegmentVersion = ver + return config +} + +func (config Config) WithPersisterNapTimeMSec(napTime int) Config { + config.PersisterNapTimeMSec = napTime + return config +} + +func (config Config) WithVirtualField(field segment.Field) Config { + config.virtualFields[field.Name()] = append(config.virtualFields[field.Name()], field) + return config +} + +func (config Config) WithNormCalc(calc func(field string, numTerms int) float32) Config { + config.NormCalc = calc + return config +} + +func (config Config) WithSegmentPlugin(plugin *SegmentPlugin) Config { + if _, ok := config.supportedSegmentPlugins[plugin.Type]; !ok { + config.supportedSegmentPlugins[plugin.Type] = map[uint32]*SegmentPlugin{} + } + config.supportedSegmentPlugins[plugin.Type][plugin.Version] = plugin + return config +} + +func (config Config) DisableOptimizeConjunction() Config { + config.OptimizeConjunction = false + return config +} + +func (config Config) DisableOptimizeConjunctionUnadorned() Config { + config.OptimizeConjunctionUnadorned = false + return config +} + +func (config Config) DisableOptimizeDisjunctionUnadorned() Config { + config.OptimizeDisjunctionUnadorned = false + return config +} + +func (config Config) WithUnsafeBatches() Config { + config.UnsafeBatch = true + return config +} + +func DefaultConfig(path string) Config { + rv := defaultConfig() + rv.DirectoryFunc = func() Directory { + return NewFileSystemDirectory(path) + } + return rv +} + +func InMemoryOnlyConfig() Config { + rv := defaultConfig() + rv.DirectoryFunc = func() Directory { + return NewInMemoryDirectory() + } + return rv +} + +func DefaultConfigWithDirectory(df func() Directory) Config { + rv := defaultConfig() + rv.DirectoryFunc = df + return rv +} + +func defaultConfig() Config { + rv := Config{ + SegmentType: ice.Type, + SegmentVersion: ice.Version, + MergePlanOptions: mergeplan.DefaultMergePlanOptions, + DeletionPolicyFunc: func() DeletionPolicy { + return NewKeepNLatestDeletionPolicy(1) + }, + + MergeBufferSize: 1024 * 1024, + + // Optimizations enabled + OptimizeConjunction: true, + OptimizeConjunctionUnadorned: true, + OptimizeDisjunctionUnadorned: true, + + MinSegmentsForInMemoryMerge: 2, + + // DefaultPersisterNapTimeMSec is kept to zero as this helps in direct + // persistence of segments with the default safe batch option. + // If the default safe batch option results in high number of + // files on disk, then users may initialize this configuration parameter + // with higher values so that the persister will nap a bit within it's + // work loop to favor better in-memory merging of segments to result + // in fewer segment files on disk. But that may come with an indexing + // performance overhead. + // Unsafe batch users are advised to override this to higher value + // for better performance especially with high data density. + PersisterNapTimeMSec: 0, + + // DefaultPersisterNapUnderNumFiles helps in controlling the pace of + // persister. At times of a slow merger progress with heavy file merging + // operations, its better to pace down the persister for letting the merger + // to catch up within a range defined by this parameter. + // Fewer files on disk (as per the merge plan) would result in keeping the + // file handle usage under limit, faster disk merger and a healthier index. + // Its been observed that such a loosely sync'ed introducer-persister-merger + // trio results in better overall performance. + PersisterNapUnderNumFiles: 1000, + + MemoryPressurePauseThreshold: math.MaxInt32, + + // VirtualFields allow you to describe a set of fields + // The index will behave as if all documents in this index were + // indexed with these fields, even though nothing is + // physically persisted about them in the index. + virtualFields: map[string][]segment.Field{}, + + NumAnalysisWorkers: 4, + AnalysisChan: make(chan func()), + GoFunc: func(f func()) { + go f() + }, + + ValidateSnapshotCRC: true, + + supportedSegmentPlugins: map[string]map[uint32]*SegmentPlugin{}, + } + + rv.WithSegmentPlugin(&SegmentPlugin{ + Type: ice.Type, + Version: ice.Version, + New: ice.New, + Load: ice.Load, + Merge: ice.Merge, + }) + + return rv +} diff --git a/vendor/github.com/blugelabs/bluge/index/count.go b/vendor/github.com/blugelabs/bluge/index/count.go new file mode 100644 index 000000000..085859afc --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/count.go @@ -0,0 +1,83 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "hash/crc32" + "io" +) + +// countHashWriter is a wrapper around a Writer which counts the number of +// bytes which have been written and computes a crc32 hash +type countHashWriter struct { + w io.Writer + crc uint32 + n int +} + +// newCountHashWriter returns a countHashWriter which wraps the provided Writer +func newCountHashWriter(w io.Writer) *countHashWriter { + return &countHashWriter{w: w} +} + +// Write writes the provided bytes to the wrapped writer, +// counts the bytes and updates the hash +func (c *countHashWriter) Write(b []byte) (int, error) { + n, err := c.w.Write(b) + c.crc = crc32.Update(c.crc, crc32.IEEETable, b[:n]) + c.n += n + return n, err +} + +// Count returns the number of bytes written +func (c *countHashWriter) Count() int { + return c.n +} + +// Sum32 returns the CRC-32 hash of the content written to this writer +func (c *countHashWriter) Sum32() uint32 { + return c.crc +} + +// countHashWriter is a wrapper around a Reader which counts the number of +// bytes which have been read and computes a crc32 hash +type countHashReader struct { + r io.Reader + crc uint32 + n int +} + +// newCountHashReader returns a countHashReader which wraps the provided Reader +func newCountHashReader(r io.Reader) *countHashReader { + return &countHashReader{r: r} +} + +// Read reads from the wrapped reader, counts the bytes and updates the hash +func (c *countHashReader) Read(p []byte) (n int, err error) { + n, err = c.r.Read(p) + c.n += n + c.crc = crc32.Update(c.crc, crc32.IEEETable, p[:n]) + return n, err +} + +// Count returns the number of bytes written +func (c *countHashReader) Count() int { + return c.n +} + +// Sum32 returns the CRC-32 hash of the content written to this writer +func (c *countHashReader) Sum32() uint32 { + return c.crc +} diff --git a/vendor/github.com/blugelabs/bluge/index/deletion.go b/vendor/github.com/blugelabs/bluge/index/deletion.go new file mode 100644 index 000000000..8fbd067b9 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/deletion.go @@ -0,0 +1,96 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +type DeletionPolicy interface { + Commit(snapshot *Snapshot) + Cleanup(Directory) error +} + +type KeepNLatestDeletionPolicy struct { + n int + liveEpochs []uint64 + deletableEpochs []uint64 + liveSegments map[uint64]map[uint64]struct{} + knownSegmentFiles map[uint64]struct{} +} + +func NewKeepNLatestDeletionPolicy(n int) *KeepNLatestDeletionPolicy { + return &KeepNLatestDeletionPolicy{ + n: n, + liveSegments: make(map[uint64]map[uint64]struct{}), + knownSegmentFiles: make(map[uint64]struct{}), + } +} + +func (p *KeepNLatestDeletionPolicy) Commit(snapshot *Snapshot) { + // build map of segments in this snapshot + snapshotSegments := make(map[uint64]struct{}) + for _, segment := range snapshot.segment { + snapshotSegments[segment.id] = struct{}{} + p.knownSegmentFiles[segment.id] = struct{}{} + } + + // add new epoch to the end + p.liveEpochs = append(p.liveEpochs, snapshot.epoch) + p.liveSegments[snapshot.epoch] = snapshotSegments + + // trim off epochs no longer needed and track separately + if len(p.liveEpochs) > p.n { + newlyDeletable := p.liveEpochs[:len(p.liveEpochs)-p.n] + p.liveEpochs = p.liveEpochs[len(p.liveEpochs)-p.n:] + p.deletableEpochs = append(p.deletableEpochs, newlyDeletable...) + } +} + +func (p *KeepNLatestDeletionPolicy) Cleanup(dir Directory) error { + p.cleanupSnapshots(dir) + p.cleanupSegments(dir) + return nil +} + +func (p *KeepNLatestDeletionPolicy) cleanupSnapshots(dir Directory) { + var remainingEpochs []uint64 + for _, deletableEpoch := range p.deletableEpochs { + err := dir.Remove(ItemKindSnapshot, deletableEpoch) + if err != nil { + remainingEpochs = append(remainingEpochs, deletableEpoch) + } else { + delete(p.liveSegments, deletableEpoch) + } + } + p.deletableEpochs = remainingEpochs +} + +func (p *KeepNLatestDeletionPolicy) cleanupSegments(dir Directory) { +OUTER: + for segmentID := range p.knownSegmentFiles { + // check all of the live snapshots and see if this file is needed + for _, segmentInSnapshot := range p.liveSegments { + if _, ok := segmentInSnapshot[segmentID]; ok { + // segment is still in use + continue OUTER + } + } + + // file is no longer needed by anyone + err := dir.Remove(ItemKindSegment, segmentID) + if err != nil { + // unable to remove, we'll try again next time + continue + } + delete(p.knownSegmentFiles, segmentID) + } +} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_dict.go b/vendor/github.com/blugelabs/bluge/index/dictionary.go similarity index 59% rename from vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_dict.go rename to vendor/github.com/blugelabs/bluge/index/dictionary.go index 0a2bd232e..e8f660bf2 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_dict.go +++ b/vendor/github.com/blugelabs/bluge/index/dictionary.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,51 +12,64 @@ // See the License for the specific language governing permissions and // limitations under the License. -package scorch +package index import ( "container/heap" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" + segment "github.com/blugelabs/bluge_segment_api" ) type segmentDictCursor struct { - dict segment.TermDictionary + dict segment.Dictionary itr segment.DictionaryIterator - curr index.DictEntry + curr segment.DictionaryEntry } -type IndexSnapshotFieldDict struct { - snapshot *IndexSnapshot +type dictionaryEntry struct { + term string + count uint64 +} + +func (d *dictionaryEntry) Term() string { + return d.term +} + +func (d *dictionaryEntry) Count() uint64 { + return d.count +} + +type dictionary struct { + snapshot *Snapshot cursors []*segmentDictCursor - entry index.DictEntry + entry dictionaryEntry } -func (i *IndexSnapshotFieldDict) Len() int { return len(i.cursors) } -func (i *IndexSnapshotFieldDict) Less(a, b int) bool { - return i.cursors[a].curr.Term < i.cursors[b].curr.Term +func (i *dictionary) Len() int { return len(i.cursors) } +func (i *dictionary) Less(a, b int) bool { + return i.cursors[a].curr.Term() < i.cursors[b].curr.Term() } -func (i *IndexSnapshotFieldDict) Swap(a, b int) { +func (i *dictionary) Swap(a, b int) { i.cursors[a], i.cursors[b] = i.cursors[b], i.cursors[a] } -func (i *IndexSnapshotFieldDict) Push(x interface{}) { +func (i *dictionary) Push(x interface{}) { i.cursors = append(i.cursors, x.(*segmentDictCursor)) } -func (i *IndexSnapshotFieldDict) Pop() interface{} { +func (i *dictionary) Pop() interface{} { n := len(i.cursors) x := i.cursors[n-1] i.cursors = i.cursors[0 : n-1] return x } -func (i *IndexSnapshotFieldDict) Next() (*index.DictEntry, error) { +func (i *dictionary) Next() (segment.DictionaryEntry, error) { if len(i.cursors) == 0 { return nil, nil } - i.entry = i.cursors[0].curr + i.entry.term = i.cursors[0].curr.Term() + i.entry.count = i.cursors[0].curr.Count() next, err := i.cursors[0].itr.Next() if err != nil { return nil, err @@ -66,12 +79,12 @@ func (i *IndexSnapshotFieldDict) Next() (*index.DictEntry, error) { heap.Pop(i) } else { // modified heap, fix it - i.cursors[0].curr = *next + i.cursors[0].curr = next heap.Fix(i, 0) } // look for any other entries with the exact same term - for len(i.cursors) > 0 && i.cursors[0].curr.Term == i.entry.Term { - i.entry.Count += i.cursors[0].curr.Count + for len(i.cursors) > 0 && i.cursors[0].curr.Term() == i.entry.Term() { + i.entry.count += i.cursors[0].curr.Count() next, err := i.cursors[0].itr.Next() if err != nil { return nil, err @@ -81,7 +94,7 @@ func (i *IndexSnapshotFieldDict) Next() (*index.DictEntry, error) { heap.Pop(i) } else { // modified heap, fix it - i.cursors[0].curr = *next + i.cursors[0].curr = next heap.Fix(i, 0) } } @@ -89,11 +102,11 @@ func (i *IndexSnapshotFieldDict) Next() (*index.DictEntry, error) { return &i.entry, nil } -func (i *IndexSnapshotFieldDict) Close() error { +func (i *dictionary) Close() error { return nil } -func (i *IndexSnapshotFieldDict) Contains(key []byte) (bool, error) { +func (i *dictionary) Contains(key []byte) (bool, error) { if len(i.cursors) == 0 { return false, nil } diff --git a/vendor/github.com/blugelabs/bluge/index/directory.go b/vendor/github.com/blugelabs/bluge/index/directory.go new file mode 100644 index 000000000..ae5dfc85f --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/directory.go @@ -0,0 +1,74 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "io" + + segment "github.com/blugelabs/bluge_segment_api" +) + +// Kinds of items managed by a Directory +const ( + ItemKindSnapshot = ".snp" + ItemKindSegment = ".seg" +) + +// WriterTo is like io.WriterTo only it can be canceled +// by closing the closeCh +type WriterTo interface { + WriteTo(w io.Writer, closeCh chan struct{}) (n int64, err error) +} + +// Directory abstracts over a collection of items +// An item has a kind (string) and an id (uint64) +type Directory interface { + + // Setup is called first, allowing a directory to + // perform additional set up, or return an error + // indicating this directory cannot be used + Setup(readOnly bool) error + + // List the ids of all the items of the specified kind + // Items are returned in descending order by id + List(kind string) ([]uint64, error) + + // Load the specified item + // Item data is accessible via the returned *segment.Data structure + // A io.Closer is returned, which must be called to release + // resources held by this open item. + // NOTE: care must be taken to handle a possible nil io.Closer + Load(kind string, id uint64) (*segment.Data, io.Closer, error) + + // Persist a new item with data from the provided WriterTo + // Implementations should monitor the closeCh and return with error + // in the event it is closed before completion. + Persist(kind string, id uint64, w WriterTo, closeCh chan struct{}) error + + // Remove the specified item + Remove(kind string, id uint64) error + + // Stats returns total number of items and their cumulative size + Stats() (numItems uint64, numBytes uint64) + + // Sync ensures directory metadata itself has been committed + Sync() error + + // Lock ensures this process has exclusive access to write in this directory + Lock() error + + // Unlock releases the lock held on this directory + Unlock() error +} diff --git a/vendor/github.com/blugelabs/bluge/index/directory_fs.go b/vendor/github.com/blugelabs/bluge/index/directory_fs.go new file mode 100644 index 000000000..024aeec0b --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/directory_fs.go @@ -0,0 +1,273 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + + "github.com/blevesearch/mmap-go" + "github.com/blugelabs/bluge/index/lock" + segment "github.com/blugelabs/bluge_segment_api" +) + +const pidFilename = "bluge.pid" + +type LoadMMapFunc func(f lock.LockedFile) (*segment.Data, io.Closer, error) + +type FileSystemDirectory struct { + path string + pid lock.LockedFile + + newDirPerm os.FileMode + newFilePerm os.FileMode + + openExclusive func(path string, flag int, perm os.FileMode) (lock.LockedFile, error) + openShared func(path string, flag int, perm os.FileMode) (lock.LockedFile, error) + + loadMMapFunc LoadMMapFunc +} + +func NewFileSystemDirectory(path string) *FileSystemDirectory { + return &FileSystemDirectory{ + path: path, + openExclusive: lock.OpenExclusive, + openShared: lock.OpenShared, + newDirPerm: 0700, + newFilePerm: 0600, + loadMMapFunc: LoadMMapAlways, + } +} + +func (d *FileSystemDirectory) exists() (bool, error) { + _, err := os.Stat(d.path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return true, err +} + +func (d *FileSystemDirectory) Setup(readOnly bool) error { + dirExists, err := d.exists() + if err != nil { + return fmt.Errorf("error checking if directory exists '%s': %w", d.path, err) + } + if !dirExists { + if readOnly { + return fmt.Errorf("readOnly, directory does not exist") + } + err = os.MkdirAll(d.path, d.newDirPerm) + if err != nil { + return fmt.Errorf("error creating directory '%s': %w", d.path, err) + } + } + return nil +} + +func (d *FileSystemDirectory) List(kind string) ([]uint64, error) { + dirEntries, err := ioutil.ReadDir(d.path) + if err != nil { + return nil, err + } + + var rv uint64Slice + for _, dirEntry := range dirEntries { + if filepath.Ext(dirEntry.Name()) != kind { + continue + } + base := dirEntry.Name() + base = base[:len(base)-len(kind)] + var epoch uint64 + epoch, err = strconv.ParseUint(base, 16, 64) + if err != nil { + return nil, fmt.Errorf("error parsing identifier '%s': %w", base, err) + } + rv = append(rv, epoch) + } + + sort.Sort(sort.Reverse(rv)) + + return rv, nil +} + +func (d *FileSystemDirectory) Persist(kind string, id uint64, w WriterTo, closeCh chan struct{}) error { + path := filepath.Join(d.path, d.fileName(kind, id)) + f, err := d.openExclusive(path, os.O_CREATE|os.O_RDWR, d.newFilePerm) + if err != nil { + return err + } + + cleanup := func() { + _ = f.Close() + _ = os.Remove(path) + } + + _, err = w.WriteTo(f.File(), closeCh) + if err != nil { + cleanup() + return err + } + + err = f.File().Sync() + if err != nil { + cleanup() + return err + } + + err = f.Close() + if err != nil { + cleanup() + return err + } + + return nil +} + +func LoadMMapAlways(f lock.LockedFile) (*segment.Data, io.Closer, error) { + mm, err := mmap.Map(f.File(), mmap.RDONLY, 0) + if err != nil { + // mmap failed, try to close the file + _ = f.Close() + return nil, nil, err + } + + closeFunc := func() error { + err := mm.Unmap() + // try to close file even if unmap failed + err2 := f.Close() + if err == nil { + // try to return first error + err = err2 + } + return err + } + + return segment.NewDataBytes(mm), closerFunc(closeFunc), nil +} + +func LoadMMapNever(f lock.LockedFile) (*segment.Data, io.Closer, error) { + data, err := segment.NewDataFile(f.File()) + if err != nil { + return nil, nil, fmt.Errorf("error creating data from file: %w", err) + } + return data, closerFunc(f.Close), nil +} + +func (d *FileSystemDirectory) SetLoadMMapFunc(f LoadMMapFunc) { + d.loadMMapFunc = f +} + +func (d *FileSystemDirectory) Load(kind string, id uint64) (*segment.Data, io.Closer, error) { + path := filepath.Join(d.path, d.fileName(kind, id)) + f, err := d.openShared(path, os.O_RDONLY, 0) + if err != nil { + return nil, nil, err + } + return d.loadMMapFunc(f) +} + +func (d *FileSystemDirectory) Remove(kind string, id uint64) error { + return d.remove(kind, id) +} + +func (d *FileSystemDirectory) Lock() error { + pidPath := filepath.Join(d.path, pidFilename) + var err error + d.pid, err = d.openExclusive(pidPath, os.O_CREATE|os.O_RDWR, d.newFilePerm) + if err != nil { + return fmt.Errorf("unable to obtain exclusive access: %w", err) + } + err = d.pid.File().Truncate(0) + if err != nil { + return fmt.Errorf("error truncating pid file: %w", err) + } + _, err = d.pid.File().Write([]byte(fmt.Sprintf("%d\n", os.Getpid()))) + if err != nil { + return fmt.Errorf("error writing pid: %w", err) + } + err = d.pid.File().Sync() + if err != nil { + return fmt.Errorf("error syncing pid file: %w", err) + } + return err +} + +func (d *FileSystemDirectory) Unlock() error { + pidPath := filepath.Join(d.path, pidFilename) + var err error + err = d.pid.Close() + if err != nil { + return fmt.Errorf("error closing pid file: %w", err) + } + err = os.RemoveAll(pidPath) + if err != nil { + return fmt.Errorf("error removing pid file: %w", err) + } + return err +} + +func (d *FileSystemDirectory) Stats() (numFilesOnDisk, numBytesUsedDisk uint64) { + fileInfos, err := ioutil.ReadDir(d.path) + if err == nil { + for _, fileInfo := range fileInfos { + if !fileInfo.IsDir() { + numFilesOnDisk++ + numBytesUsedDisk += uint64(fileInfo.Size()) + } + } + } + return numFilesOnDisk, numBytesUsedDisk +} + +func (d *FileSystemDirectory) Sync() error { + dir, err := os.Open(d.path) + if err != nil { + return fmt.Errorf("error opening directory for sync: %w", err) + } + err = dir.Sync() + if err != nil { + _ = dir.Close() + return fmt.Errorf("error syncing directory: %w", err) + } + err = dir.Close() + if err != nil { + return fmt.Errorf("error closing directing after sync: %w", err) + } + return nil +} + +func (d *FileSystemDirectory) fileName(kind string, id uint64) string { + return fmt.Sprintf("%012x", id) + kind +} + +type uint64Slice []uint64 + +func (e uint64Slice) Len() int { return len(e) } +func (e uint64Slice) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e uint64Slice) Less(i, j int) bool { return e[i] < e[j] } + +type closerFunc func() error + +func (c closerFunc) Close() error { + return c() +} diff --git a/vendor/github.com/blugelabs/bluge/index/directory_fs_nix.go b/vendor/github.com/blugelabs/bluge/index/directory_fs_nix.go new file mode 100644 index 000000000..e90d7ea94 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/directory_fs_nix.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package index + +import ( + "os" + "path/filepath" +) + +func (d *FileSystemDirectory) remove(kind string, id uint64) error { + segmentPath := filepath.Join(d.path, d.fileName(kind, id)) + segmentFile, err := d.openExclusive(segmentPath, os.O_CREATE|os.O_RDWR, d.newFilePerm) + if err != nil { + return err + } + defer func() { + _ = segmentFile.Close() + }() + + return os.Remove(segmentPath) +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/scorer/sqrt_cache.go b/vendor/github.com/blugelabs/bluge/index/directory_fs_windows.go similarity index 69% rename from vendor/github.com/blevesearch/bleve/v2/search/scorer/sqrt_cache.go rename to vendor/github.com/blugelabs/bluge/index/directory_fs_windows.go index e26d33d93..df567560f 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/scorer/sqrt_cache.go +++ b/vendor/github.com/blugelabs/bluge/index/directory_fs_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 The Bluge Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,19 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -package scorer +package index import ( - "math" + "os" + "path/filepath" ) -var SqrtCache []float64 - -const MaxSqrtCache = 64 - -func init() { - SqrtCache = make([]float64, MaxSqrtCache) - for i := 0; i < MaxSqrtCache; i++ { - SqrtCache[i] = math.Sqrt(float64(i)) - } +func (d *FileSystemDirectory) remove(kind string, id uint64) error { + segmentPath := filepath.Join(d.path, d.fileName(kind, id)) + return os.Remove(segmentPath) } diff --git a/vendor/github.com/blugelabs/bluge/index/directory_mem.go b/vendor/github.com/blugelabs/bluge/index/directory_mem.go new file mode 100644 index 000000000..a5a0e22a6 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/directory_mem.go @@ -0,0 +1,107 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + + segment "github.com/blugelabs/bluge_segment_api" +) + +type InMemoryDirectory struct { + segLock sync.RWMutex + segments map[uint64]*bytes.Buffer +} + +func NewInMemoryDirectory() *InMemoryDirectory { + return &InMemoryDirectory{ + segLock: sync.RWMutex{}, + segments: make(map[uint64]*bytes.Buffer), + } +} + +func (d *InMemoryDirectory) Setup(readOnly bool) error { + return nil +} + +func (d *InMemoryDirectory) List(kind string) ([]uint64, error) { + d.segLock.RLock() + defer d.segLock.RUnlock() + var rv uint64Slice + if kind == ItemKindSegment { + for id := range d.segments { + rv = append(rv, id) + } + } + + sort.Sort(sort.Reverse(rv)) + return rv, nil +} + +func (d *InMemoryDirectory) Load(kind string, id uint64) (*segment.Data, io.Closer, error) { + d.segLock.RLock() + defer d.segLock.RUnlock() + if kind == ItemKindSegment { + if buf, ok := d.segments[id]; ok { + return segment.NewDataBytes(buf.Bytes()), nil, nil + } + return nil, nil, fmt.Errorf("segment %d not found", id) + } + + return nil, nil, nil +} + +func (d *InMemoryDirectory) Persist(kind string, id uint64, w WriterTo, closeCh chan struct{}) error { + d.segLock.Lock() + defer d.segLock.Unlock() + if kind == ItemKindSegment { + var buf bytes.Buffer + _, err := w.WriteTo(&buf, closeCh) + if err != nil { + return err + } + d.segments[id] = &buf + } + return nil +} + +func (d *InMemoryDirectory) Remove(kind string, id uint64) error { + d.segLock.Lock() + defer d.segLock.Unlock() + if kind == ItemKindSegment { + delete(d.segments, id) + } + return nil +} + +func (d *InMemoryDirectory) Stats() (numItems, numBytes uint64) { + return 0, 0 +} + +func (d *InMemoryDirectory) Sync() error { + return nil +} + +func (d *InMemoryDirectory) Lock() error { + return nil +} + +func (d *InMemoryDirectory) Unlock() error { + return nil +} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/empty.go b/vendor/github.com/blugelabs/bluge/index/empty.go similarity index 78% rename from vendor/github.com/blevesearch/bleve/v2/index/scorch/empty.go rename to vendor/github.com/blugelabs/bluge/index/empty.go index 4d4755531..b78a23109 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/empty.go +++ b/vendor/github.com/blugelabs/bluge/index/empty.go @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package scorch +package index -import segment "github.com/blevesearch/scorch_segment_api/v2" +import segment "github.com/blugelabs/bluge_segment_api" type emptyPostingsIterator struct{} @@ -30,4 +30,16 @@ func (e *emptyPostingsIterator) Size() int { return 0 } +func (e *emptyPostingsIterator) Empty() bool { + return true +} + +func (e *emptyPostingsIterator) Count() uint64 { + return 0 +} + +func (e *emptyPostingsIterator) Close() error { + return nil +} + var anEmptyPostingsIterator = &emptyPostingsIterator{} diff --git a/vendor/github.com/blugelabs/bluge/index/event.go b/vendor/github.com/blugelabs/bluge/index/event.go new file mode 100644 index 000000000..0e8c1a9c8 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/event.go @@ -0,0 +1,37 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import "time" + +// Event represents the information provided in an OnEvent() callback. +type Event struct { + Kind int + Chill *Writer + Duration time.Duration +} + +// Kinds of index events +const ( + EventKindCloseStart = 1 // when the index has started to close + EventKindClose = 2 // when the index has been fully closed + EventKindMergerProgress = 3 // when the index has completed a round of merge operations + EventKindPersisterProgress = 4 // when the index has completed a round of persistence operations + EventKindBatchIntroductionStart = 5 // when the index has started to introduce a new batch + EventKindBatchIntroduction = 6 // when index has finished introducing a batch + EventKindMergeTaskIntroductionStart = 7 // when the index has started to introduce a merge + EventKindMergeTaskIntroduction = 8 // when the index has finished introdocing a merge + +) diff --git a/vendor/github.com/blugelabs/bluge/index/introducer.go b/vendor/github.com/blugelabs/bluge/index/introducer.go new file mode 100644 index 000000000..6c6400b33 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/introducer.go @@ -0,0 +1,356 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "fmt" + "sync/atomic" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/RoaringBitmap/roaring" +) + +type segmentIntroduction struct { + id uint64 + data *segmentWrapper + obsoletes map[uint64]*roaring.Bitmap + idTerms []segment.Term + internal map[string][]byte + + applied chan error + persisted chan error + persistedCallback func(error) +} + +type persistIntroduction struct { + persisted map[uint64]*segmentWrapper + applied notificationChan +} + +func (s *Writer) introducerLoop(introductions chan *segmentIntroduction, + persists chan *persistIntroduction, merges chan *segmentMerge, + introducerNotifier watcherChan, nextSnapshotEpoch uint64) { + var introduceWatchers epochWatchers +OUTER: + for { + atomic.AddUint64(&s.stats.TotIntroduceLoop, 1) + + select { + case <-s.closeCh: + break OUTER + + case epochWatcher := <-introducerNotifier: + introduceWatchers.Add(epochWatcher) + + case nextMerge := <-merges: + introduceSnapshotEpoch := nextSnapshotEpoch + nextSnapshotEpoch++ + s.introduceMerge(nextMerge, introduceSnapshotEpoch) + + case next := <-introductions: + introduceSnapshotEpoch := nextSnapshotEpoch + nextSnapshotEpoch++ + err := s.introduceSegment(next, introduceSnapshotEpoch) + if err != nil { + continue OUTER + } + + case persist := <-persists: + introduceSnapshotEpoch := nextSnapshotEpoch + nextSnapshotEpoch++ + s.introducePersist(persist, introduceSnapshotEpoch) + } + + epochCurr := s.currentEpoch() + introduceWatchers.NotifySatisfiedWatchers(epochCurr) + } + + s.asyncTasks.Done() +} + +func (s *Writer) introduceSegment(next *segmentIntroduction, introduceSnapshotEpoch uint64) error { + atomic.AddUint64(&s.stats.TotIntroduceSegmentBeg, 1) + defer atomic.AddUint64(&s.stats.TotIntroduceSegmentEnd, 1) + + root := s.currentSnapshot() + defer func() { _ = root.Close() }() + + nsegs := len(root.segment) + + // prepare new index snapshot + newSnapshot := &Snapshot{ + parent: s, + epoch: introduceSnapshotEpoch, + segment: make([]*segmentSnapshot, 0, nsegs+1), + offsets: make([]uint64, 0, nsegs+1), + refs: 1, + creator: "introduceSegment", + } + + // iterate through current segments + var running, docsToPersistCount uint64 + var memSegments, fileSegments uint64 + for i := range root.segment { + // see if optimistic work included this segment + delta, ok := next.obsoletes[root.segment[i].id] + if !ok { + var err error + delta, err = root.segment[i].segment.DocsMatchingTerms(next.idTerms) + if err != nil { + next.applied <- fmt.Errorf("error computing doc numbers: %v", err) + close(next.applied) + _ = newSnapshot.Close() + return err + } + } + + newss := &segmentSnapshot{ + id: root.segment[i].id, + segment: root.segment[i].segment, + creator: root.segment[i].creator, + } + + // apply new obsoletions + if root.segment[i].deleted == nil { + newss.deleted = delta + } else { + newss.deleted = roaring.Or(root.segment[i].deleted, delta) + } + if newss.deleted.IsEmpty() { + newss.deleted = nil + } + + // check for live size before copying + if newss.LiveSize() > 0 { + newSnapshot.segment = append(newSnapshot.segment, newss) + root.segment[i].segment.AddRef() + newSnapshot.offsets = append(newSnapshot.offsets, running) + running += newss.segment.Count() + } + + if !root.segment[i].segment.Persisted() { + docsToPersistCount += root.segment[i].Count() + memSegments++ + } else { + fileSegments++ + } + } + + atomic.StoreUint64(&s.stats.TotItemsToPersist, docsToPersistCount) + atomic.StoreUint64(&s.stats.TotMemorySegmentsAtRoot, memSegments) + atomic.StoreUint64(&s.stats.TotFileSegmentsAtRoot, fileSegments) + + // append new segment, if any, to end of the new index snapshot + if next.data != nil { + newSegmentSnapshot := &segmentSnapshot{ + id: next.id, + segment: next.data, // take ownership of next.data's ref-count + creator: "introduceSegment", + } + newSnapshot.segment = append(newSnapshot.segment, newSegmentSnapshot) + newSnapshot.offsets = append(newSnapshot.offsets, running) + + // increment numItemsIntroduced which tracks the number of items + // queued for persistence. + atomic.AddUint64(&s.stats.TotIntroducedItems, newSegmentSnapshot.Count()) + atomic.AddUint64(&s.stats.TotIntroducedSegmentsBatch, 1) + } + + newSnapshot.updateSize() + + s.replaceRoot(newSnapshot, next.persisted, next.persistedCallback) + + close(next.applied) + + return nil +} + +func (s *Writer) introducePersist(persist *persistIntroduction, introduceSnapshotEpoch uint64) { + atomic.AddUint64(&s.stats.TotIntroducePersistBeg, 1) + defer atomic.AddUint64(&s.stats.TotIntroducePersistEnd, 1) + + root := s.currentSnapshot() + defer func() { _ = root.Close() }() + + newIndexSnapshot := &Snapshot{ + parent: s, + epoch: introduceSnapshotEpoch, + segment: make([]*segmentSnapshot, len(root.segment)), + offsets: make([]uint64, len(root.offsets)), + refs: 1, + creator: "introducePersist", + } + + var docsToPersistCount uint64 + var memSegments, fileSegments uint64 + for i, segSnapshot := range root.segment { + // see if this segment has been replaced + if replacement, ok := persist.persisted[segSnapshot.id]; ok { + newSegmentSnapshot := &segmentSnapshot{ + id: segSnapshot.id, + segment: replacement, + deleted: segSnapshot.deleted, + creator: "introducePersist", + } + newIndexSnapshot.segment[i] = newSegmentSnapshot + delete(persist.persisted, segSnapshot.id) + + // update items persisted incase of a new segment snapshot + atomic.AddUint64(&s.stats.TotPersistedItems, newSegmentSnapshot.Count()) + atomic.AddUint64(&s.stats.TotPersistedSegments, 1) + fileSegments++ + } else { + newIndexSnapshot.segment[i] = root.segment[i] + newIndexSnapshot.segment[i].segment.AddRef() + + if !root.segment[i].segment.Persisted() { + docsToPersistCount += root.segment[i].Count() + memSegments++ + } else { + fileSegments++ + } + } + newIndexSnapshot.offsets[i] = root.offsets[i] + } + + atomic.StoreUint64(&s.stats.TotItemsToPersist, docsToPersistCount) + atomic.StoreUint64(&s.stats.TotMemorySegmentsAtRoot, memSegments) + atomic.StoreUint64(&s.stats.TotFileSegmentsAtRoot, fileSegments) + newIndexSnapshot.updateSize() + + s.replaceRoot(newIndexSnapshot, nil, nil) + + close(persist.applied) +} + +// The introducer should definitely handle the segmentMerge.notify +// channel before exiting the introduceMerge. +func (s *Writer) introduceMerge(nextMerge *segmentMerge, introduceSnapshotEpoch uint64) { + atomic.AddUint64(&s.stats.TotIntroduceMergeBeg, 1) + defer atomic.AddUint64(&s.stats.TotIntroduceMergeEnd, 1) + + root := s.currentSnapshot() + defer func() { _ = root.Close() }() + + newSnapshot := &Snapshot{ + parent: s, + epoch: introduceSnapshotEpoch, + refs: 1, + creator: "introduceMerge", + } + + // iterate through current segments + newSegmentDeleted := roaring.NewBitmap() + var running, docsToPersistCount uint64 + var memSegments, fileSegments uint64 + for i := range root.segment { + segmentID := root.segment[i].id + segmentIsGoingAway := nextMerge.ProcessSegmentNow(segmentID, root.segment[i], newSegmentDeleted) + if !segmentIsGoingAway && root.segment[i].LiveSize() > 0 { + // this segment is staying + newSnapshot.segment = append(newSnapshot.segment, &segmentSnapshot{ + id: root.segment[i].id, + segment: root.segment[i].segment, + deleted: root.segment[i].deleted, + creator: root.segment[i].creator, + }) + root.segment[i].segment.AddRef() + newSnapshot.offsets = append(newSnapshot.offsets, running) + running += root.segment[i].segment.Count() + + if !root.segment[i].segment.Persisted() { + docsToPersistCount += root.segment[i].Count() + memSegments++ + } else { + fileSegments++ + } + } + } + + // before the newMerge introduction, need to clean the newly + // merged segment wrt the current root segments, hence + // applying the obsolete segment contents to newly merged segment + for segID, ss := range nextMerge.old { + obsoleted := ss.DocNumbersLive() + if obsoleted != nil { + obsoletedIter := obsoleted.Iterator() + for obsoletedIter.HasNext() { + oldDocNum := obsoletedIter.Next() + newDocNum := nextMerge.oldNewDocNums[segID][oldDocNum] + newSegmentDeleted.Add(uint32(newDocNum)) + } + } + } + var skipped bool + // In case where all the docs in the newly merged segment getting + // deleted by the time we reach here, can skip the introduction. + if nextMerge.new != nil && + nextMerge.new.Count() > newSegmentDeleted.GetCardinality() { + // put new segment at end + newSnapshot.segment = append(newSnapshot.segment, &segmentSnapshot{ + id: nextMerge.id, + segment: nextMerge.new, // take ownership for nextMerge.new's ref-count + deleted: newSegmentDeleted, + creator: "introduceMerge", + }) + newSnapshot.offsets = append(newSnapshot.offsets, running) + atomic.AddUint64(&s.stats.TotIntroducedSegmentsMerge, 1) + + if nextMerge.new.Persisted() { + fileSegments++ + } else { + docsToPersistCount += nextMerge.new.Count() - newSegmentDeleted.GetCardinality() + memSegments++ + } + } else { + skipped = true + atomic.AddUint64(&s.stats.TotFileMergeIntroductionsObsoleted, 1) + } + + atomic.StoreUint64(&s.stats.TotItemsToPersist, docsToPersistCount) + atomic.StoreUint64(&s.stats.TotMemorySegmentsAtRoot, memSegments) + atomic.StoreUint64(&s.stats.TotFileSegmentsAtRoot, fileSegments) + + newSnapshot.addRef() // 1 ref for the nextMerge.notify response + + newSnapshot.updateSize() + + s.replaceRoot(newSnapshot, nil, nil) + + // notify requester that we incorporated this + nextMerge.notifyCh <- &mergeTaskIntroStatus{snapshot: newSnapshot, skipped: skipped} + close(nextMerge.notifyCh) +} + +func (s *Writer) replaceRoot(newSnapshot *Snapshot, persistedCh chan error, persistedCallback func(error)) { + s.rootLock.Lock() + if persistedCh != nil { + s.rootPersisted = append(s.rootPersisted, persistedCh) + } + if persistedCallback != nil { + s.persistedCallbacks = append(s.persistedCallbacks, persistedCallback) + } + rootPrev := s.root + s.root = newSnapshot + if s.root != nil { + atomic.StoreUint64(&s.stats.CurRootEpoch, s.root.epoch) + } + s.rootLock.Unlock() + + if rootPrev != nil { + _ = rootPrev.Close() + } +} diff --git a/vendor/github.com/blugelabs/bluge/index/lock/lock.go b/vendor/github.com/blugelabs/bluge/index/lock/lock.go new file mode 100644 index 000000000..8e2101f8e --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/lock/lock.go @@ -0,0 +1,55 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lock + +import ( + "os" +) + +type LockedFile interface { + File() *os.File + Exclusive() bool + Close() error +} + +type DefaultLockedFile struct { + f *os.File + exclusive bool +} + +func OpenExclusive(path string, flag int, perm os.FileMode) (LockedFile, error) { + return open(path, flag, perm, true) +} + +func OpenShared(path string, flag int, perm os.FileMode) (LockedFile, error) { + return open(path, flag, perm, false) +} + +func (e *DefaultLockedFile) File() *os.File { + return e.f +} + +func (e *DefaultLockedFile) Exclusive() bool { + return e.exclusive +} + +func (e *DefaultLockedFile) Close() error { + err := e.unlock() + err2 := e.f.Close() + if err2 != nil && err == nil { + err = err2 + } + return err +} diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/keyword/keyword.go b/vendor/github.com/blugelabs/bluge/index/lock/lock_nix.go similarity index 51% rename from vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/keyword/keyword.go rename to vendor/github.com/blugelabs/bluge/index/lock/lock_nix.go index 473c29471..536656762 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/keyword/keyword.go +++ b/vendor/github.com/blugelabs/bluge/index/lock/lock_nix.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 The Bluge Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,27 +12,36 @@ // See the License for the specific language governing permissions and // limitations under the License. -package keyword +// +build darwin dragonfly freebsd linux netbsd openbsd + +package lock import ( - "github.com/blevesearch/bleve/v2/analysis" - "github.com/blevesearch/bleve/v2/analysis/tokenizer/single" - "github.com/blevesearch/bleve/v2/registry" -) + "os" -const Name = "keyword" + "golang.org/x/sys/unix" +) -func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (*analysis.Analyzer, error) { - keywordTokenizer, err := cache.TokenizerNamed(single.Name) +func open(path string, flag int, perm os.FileMode, exclusive bool) (LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) if err != nil { return nil, err } - rv := analysis.Analyzer{ - Tokenizer: keywordTokenizer, + how := unix.LOCK_SH | unix.LOCK_NB + if exclusive { + how = unix.LOCK_EX | unix.LOCK_NB + } + err = unix.Flock(int(f.Fd()), how) + if err != nil { + _ = f.Close() + return nil, err } - return &rv, nil + return &DefaultLockedFile{ + f: f, + exclusive: exclusive, + }, nil } -func init() { - registry.RegisterAnalyzer(Name, AnalyzerConstructor) +func (e *DefaultLockedFile) unlock() error { + return nil } diff --git a/vendor/github.com/blugelabs/bluge/index/lock/lock_windows.go b/vendor/github.com/blugelabs/bluge/index/lock/lock_windows.go new file mode 100644 index 000000000..3518cc7f6 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/lock/lock_windows.go @@ -0,0 +1,48 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lock + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func open(path string, flag int, perm os.FileMode, exclusive bool) (LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + + lockFlags := uint32(windows.LOCKFILE_FAIL_IMMEDIATELY) + if exclusive { + lockFlags |= uint32(windows.LOCKFILE_EXCLUSIVE_LOCK) + } + + err = windows.LockFileEx(windows.Handle(f.Fd()), lockFlags, 0, 1, 0, &windows.Overlapped{}) + if err != nil { + _ = f.Close() + return nil, err + } + + return &DefaultLockedFile{ + f: f, + exclusive: exclusive, + }, nil +} + +func (e *DefaultLockedFile) unlock() error { + return windows.UnlockFileEx(windows.Handle(e.f.Fd()), 0, 1, 0, &windows.Overlapped{}) +} diff --git a/vendor/github.com/blugelabs/bluge/index/merge.go b/vendor/github.com/blugelabs/bluge/index/merge.go new file mode 100644 index 000000000..c3fe6a05a --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/merge.go @@ -0,0 +1,374 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "fmt" + "sync/atomic" + "time" + + "github.com/RoaringBitmap/roaring" + "github.com/blugelabs/bluge/index/mergeplan" + segment "github.com/blugelabs/bluge_segment_api" +) + +func (s *Writer) mergerLoop(merges chan *segmentMerge, persisterNotifier watcherChan) { + defer s.asyncTasks.Done() + + var lastEpochMergePlanned uint64 + + // tell the persister we're waiting for anything after the initialEpoch + ew, err := persisterNotifier.NotifyUsAfter(0, s.closeCh) + if err != nil { + return + } + +OUTER: + for { + atomic.AddUint64(&s.stats.TotFileMergeLoopBeg, 1) + + select { + case <-s.closeCh: + break OUTER + + case <-ew.notifyCh: + // check to see if there is a new snapshot to persist + ourSnapshot := s.currentSnapshot() + atomic.StoreUint64(&s.stats.mergeSnapshotSize, uint64(ourSnapshot.Size())) + atomic.StoreUint64(&s.stats.mergeEpoch, ourSnapshot.epoch) + + if ourSnapshot.epoch != lastEpochMergePlanned { + startTime := time.Now() + + // lets get started + err = s.planMergeAtSnapshot(merges, ourSnapshot, s.config.MergePlanOptions) + if err != nil { + atomic.StoreUint64(&s.stats.mergeEpoch, 0) + if err == segment.ErrClosed { + // index has been closed + _ = ourSnapshot.Close() + break OUTER + } + s.fireAsyncError(fmt.Errorf("merging err: %v", err)) + _ = ourSnapshot.Close() + atomic.AddUint64(&s.stats.TotFileMergeLoopErr, 1) + continue OUTER + } + lastEpochMergePlanned = ourSnapshot.epoch + + atomic.StoreUint64(&s.stats.LastMergedEpoch, ourSnapshot.epoch) + + s.fireEvent(EventKindMergerProgress, time.Since(startTime)) + } + _ = ourSnapshot.Close() + + // update the persister, that we're now waiting for something + // after lastEpochMergePlanned + ew, err = persisterNotifier.NotifyUsAfter(lastEpochMergePlanned, s.closeCh) + if err != nil { + break OUTER + } + } + + atomic.AddUint64(&s.stats.TotFileMergeLoopEnd, 1) + } +} + +func (s *Writer) planMergeAtSnapshot(merges chan *segmentMerge, ourSnapshot *Snapshot, + options mergeplan.Options) error { + // build list of persisted segments in this snapshot + var onlyPersistedSnapshots []mergeplan.Segment + for _, segmentSnapshot := range ourSnapshot.segment { + if segmentSnapshot.segment.Persisted() { + onlyPersistedSnapshots = append(onlyPersistedSnapshots, segmentSnapshot) + } + } + + atomic.AddUint64(&s.stats.TotFileMergePlan, 1) + + // give this list to the planner + resultMergePlan, err := mergeplan.Plan(onlyPersistedSnapshots, &options) + if err != nil { + atomic.AddUint64(&s.stats.TotFileMergePlanErr, 1) + return fmt.Errorf("merge planning err: %v", err) + } + if resultMergePlan == nil { + // nothing to do + atomic.AddUint64(&s.stats.TotFileMergePlanNone, 1) + return nil + } + atomic.AddUint64(&s.stats.TotFileMergePlanOk, 1) + + atomic.AddUint64(&s.stats.TotFileMergePlanTasks, uint64(len(resultMergePlan.Tasks))) + + // process tasks in serial for now + for _, task := range resultMergePlan.Tasks { + err := s.executeMergeTask(merges, task) + if err != nil { + return err + } + } + + return nil +} + +func (s *Writer) executeMergeTask(merges chan *segmentMerge, task *mergeplan.MergeTask) error { + if len(task.Segments) == 0 { + atomic.AddUint64(&s.stats.TotFileMergePlanTasksSegmentsEmpty, 1) + return nil + } + + atomic.AddUint64(&s.stats.TotFileMergePlanTasksSegments, uint64(len(task.Segments))) + + oldMap, segmentsToMerge, docsToDrop := s.planSegmentsToMerge(task) + + newSegmentID := atomic.AddUint64(&s.nextSegmentID, 1) + var oldNewDocNums map[uint64][]uint64 + var seg *segmentWrapper + if len(segmentsToMerge) > 0 { + fileMergeZapStartTime := time.Now() + + atomic.AddUint64(&s.stats.TotFileMergeZapBeg, 1) + newDocNums, err := s.merge(segmentsToMerge, docsToDrop, newSegmentID) + atomic.AddUint64(&s.stats.TotFileMergeZapEnd, 1) + + fileMergeZapTime := uint64(time.Since(fileMergeZapStartTime)) + atomic.AddUint64(&s.stats.TotFileMergeZapTime, fileMergeZapTime) + if atomic.LoadUint64(&s.stats.MaxFileMergeZapTime) < fileMergeZapTime { + atomic.StoreUint64(&s.stats.MaxFileMergeZapTime, fileMergeZapTime) + } + + if err != nil { + atomic.AddUint64(&s.stats.TotFileMergePlanTasksErr, 1) + if err == segment.ErrClosed { + return err + } + return fmt.Errorf("merging failed: %v", err) + } + + seg, err = s.loadSegment(newSegmentID, s.segPlugin) + if err != nil { + atomic.AddUint64(&s.stats.TotFileMergePlanTasksErr, 1) + return err + } + oldNewDocNums = make(map[uint64][]uint64) + for i, segNewDocNums := range newDocNums { + oldNewDocNums[task.Segments[i].ID()] = segNewDocNums + } + + atomic.AddUint64(&s.stats.TotFileMergeSegments, uint64(len(segmentsToMerge))) + } + + sm := &segmentMerge{ + id: newSegmentID, + old: oldMap, + oldNewDocNums: oldNewDocNums, + new: seg, + notifyCh: make(chan *mergeTaskIntroStatus), + } + + s.fireEvent(EventKindMergeTaskIntroductionStart, 0) + + // give it to the introducer + select { + case <-s.closeCh: + _ = seg.Close() + return segment.ErrClosed + case merges <- sm: + atomic.AddUint64(&s.stats.TotFileMergeIntroductions, 1) + } + + introStartTime := time.Now() + // it is safe to blockingly wait for the merge introduction + // here as the introducer is bound to handle the notify channel. + mergeTaskIntroStatus := <-sm.notifyCh + introTime := uint64(time.Since(introStartTime)) + atomic.AddUint64(&s.stats.TotFileMergeZapIntroductionTime, introTime) + if atomic.LoadUint64(&s.stats.MaxFileMergeZapIntroductionTime) < introTime { + atomic.StoreUint64(&s.stats.MaxFileMergeZapIntroductionTime, introTime) + } + atomic.AddUint64(&s.stats.TotFileMergeIntroductionsDone, 1) + if mergeTaskIntroStatus != nil && mergeTaskIntroStatus.snapshot != nil { + _ = mergeTaskIntroStatus.snapshot.Close() + if mergeTaskIntroStatus.skipped { + // decrement the ref counts on skipping introduction. + // FIXME stale file that won't get cleaned up + _ = seg.Close() + } + } + + atomic.AddUint64(&s.stats.TotFileMergePlanTasksDone, 1) + s.fireEvent(EventKindMergeTaskIntroduction, 0) + return nil +} + +func (s *Writer) planSegmentsToMerge(task *mergeplan.MergeTask) (oldMap map[uint64]*segmentSnapshot, + segmentsToMerge []segment.Segment, docsToDrop []*roaring.Bitmap) { + oldMap = make(map[uint64]*segmentSnapshot) + segmentsToMerge = make([]segment.Segment, 0, len(task.Segments)) + docsToDrop = make([]*roaring.Bitmap, 0, len(task.Segments)) + for _, planSegment := range task.Segments { + if segSnapshot, ok := planSegment.(*segmentSnapshot); ok { + oldMap[segSnapshot.id] = segSnapshot + if segSnapshot.segment.Persisted() { + if segSnapshot.LiveSize() == 0 { + atomic.AddUint64(&s.stats.TotFileMergeSegmentsEmpty, 1) + oldMap[segSnapshot.id] = nil + } else { + segmentsToMerge = append(segmentsToMerge, segSnapshot.segment.Segment) + docsToDrop = append(docsToDrop, segSnapshot.deleted) + } + } + } + } + return oldMap, segmentsToMerge, docsToDrop +} + +type mergeTaskIntroStatus struct { + snapshot *Snapshot + skipped bool +} + +type segmentMerge struct { + id uint64 + old map[uint64]*segmentSnapshot + oldNewDocNums map[uint64][]uint64 + new *segmentWrapper + notifyCh chan *mergeTaskIntroStatus +} + +// ProcessSegmentNow takes in a segmentID, the current version of that segment snapshot +// which could have more deleted items since we examined it for the merge, and a +// roaringBitmap to track these new deletions. +// If this segment isn't going away, we do nothing, returning false. +// If this segment is going away, we check for any deletions since we examined it +// during the merge, for each of these, we find the new document number of the item, +// and flip the bit in the newSegmentDeleted bitmap, then returning true. +func (s *segmentMerge) ProcessSegmentNow(segmentID uint64, segSnapNow *segmentSnapshot, + newSegmentDeleted *roaring.Bitmap) bool { + if segSnapAtMerge, ok := s.old[segmentID]; ok { + if segSnapAtMerge != nil && segSnapNow.deleted != nil { + // assume all these deletes are new + deletedSince := segSnapNow.deleted + // if we already knew about some of them, remove + if segSnapAtMerge.deleted != nil { + deletedSince = roaring.AndNot(segSnapNow.deleted, segSnapAtMerge.deleted) + } + deletedSinceItr := deletedSince.Iterator() + for deletedSinceItr.HasNext() { + oldDocNum := deletedSinceItr.Next() + newDocNum := s.oldNewDocNums[segmentID][oldDocNum] + newSegmentDeleted.Add(uint32(newDocNum)) + } + } + // clean up the old segment map to figure out the + // obsolete segments wrt root in meantime, whatever + // segments left behind in old map after processing + // the root segments would be the obsolete segment set + delete(s.old, segmentID) + + return true + } + return false +} + +// perform a merging of the given SegmentBase instances into a new, +// persisted segment, and synchronously introduce that new segment +// into the root +func (s *Writer) mergeSegmentBases(merges chan *segmentMerge, snapshot *Snapshot, + sbs []segment.Segment, sbsDrops []*roaring.Bitmap, + sbsIndexes []int) (*Snapshot, uint64, error) { + atomic.AddUint64(&s.stats.TotMemMergeBeg, 1) + + memMergeZapStartTime := time.Now() + + atomic.AddUint64(&s.stats.TotMemMergeZapBeg, 1) + + newSegmentID := atomic.AddUint64(&s.nextSegmentID, 1) + + newDocNums, err := s.merge(sbs, sbsDrops, newSegmentID) + + atomic.AddUint64(&s.stats.TotMemMergeZapEnd, 1) + + memMergeZapTime := uint64(time.Since(memMergeZapStartTime)) + atomic.AddUint64(&s.stats.TotMemMergeZapTime, memMergeZapTime) + if atomic.LoadUint64(&s.stats.MaxMemMergeZapTime) < memMergeZapTime { + atomic.StoreUint64(&s.stats.MaxMemMergeZapTime, memMergeZapTime) + } + + if err != nil { + atomic.AddUint64(&s.stats.TotMemMergeErr, 1) + return nil, 0, err + } + + seg, err := s.loadSegment(newSegmentID, s.segPlugin) + if err != nil { + atomic.AddUint64(&s.stats.TotMemMergeErr, 1) + return nil, 0, err + } + + // update persisted stats + atomic.AddUint64(&s.stats.TotPersistedItems, seg.Count()) + atomic.AddUint64(&s.stats.TotPersistedSegments, 1) + + sm := &segmentMerge{ + id: newSegmentID, + old: make(map[uint64]*segmentSnapshot), + oldNewDocNums: make(map[uint64][]uint64), + new: seg, + notifyCh: make(chan *mergeTaskIntroStatus), + } + + for i, idx := range sbsIndexes { + ss := snapshot.segment[idx] + sm.old[ss.id] = ss + sm.oldNewDocNums[ss.id] = newDocNums[i] + } + + select { // send to introducer + case <-s.closeCh: + _ = seg.DecRef() + return nil, 0, segment.ErrClosed + case merges <- sm: + } + + // blockingly wait for the introduction to complete + var newSnapshot *Snapshot + mergeTaskIntroStatus := <-sm.notifyCh + if mergeTaskIntroStatus != nil && mergeTaskIntroStatus.snapshot != nil { + newSnapshot = mergeTaskIntroStatus.snapshot + atomic.AddUint64(&s.stats.TotMemMergeSegments, uint64(len(sbs))) + atomic.AddUint64(&s.stats.TotMemMergeDone, 1) + if mergeTaskIntroStatus.skipped { + // decrement the ref counts on skipping introduction. + _ = newSnapshot.Close() + _ = seg.Close() + newSnapshot = nil + } + } + return newSnapshot, newSegmentID, nil +} + +func (s *Writer) merge(segments []segment.Segment, drops []*roaring.Bitmap, id uint64) ( + [][]uint64, error) { + merger := s.segPlugin.Merge(segments, drops, s.config.MergeBufferSize) + + err := s.directory.Persist(ItemKindSegment, id, merger, s.closeCh) + if err != nil { + return nil, err + } + + return merger.DocumentNumbers(), nil +} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/mergeplan/merge_plan.go b/vendor/github.com/blugelabs/bluge/index/mergeplan/merge_plan.go similarity index 88% rename from vendor/github.com/blevesearch/bleve/v2/index/scorch/mergeplan/merge_plan.go rename to vendor/github.com/blugelabs/bluge/index/mergeplan/merge_plan.go index 752350662..108229d80 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/mergeplan/merge_plan.go +++ b/vendor/github.com/blugelabs/bluge/index/mergeplan/merge_plan.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ import ( // calculate segment merging. type Segment interface { // Unique id of the segment -- used for sorting. - Id() uint64 + ID() uint64 // Full segment size (the size before any logical deletions). FullSize() int64 @@ -43,7 +43,7 @@ type Segment interface { // assigned to at most a single MergeTask in the output MergePlan. A // segment not assigned to any MergeTask means the segment should // remain unmerged. -func Plan(segments []Segment, o *MergePlanOptions) (*MergePlan, error) { +func Plan(segments []Segment, o *Options) (*MergePlan, error) { return plan(segments, o) } @@ -63,8 +63,8 @@ type MergeTask struct { Segments []Segment } -// The MergePlanOptions is designed to be reusable between planning calls. -type MergePlanOptions struct { +// The Options is designed to be reusable between planning calls. +type Options struct { // Max # segments per logarithmic tier, or max width of any // logarithmic “step”. Smaller values mean more merging but fewer // segments. Should be >= SegmentsPerMergeTask, else you'll have @@ -99,17 +99,17 @@ type MergePlanOptions struct { // Optional, defaults to mergeplan.CalcBudget(). CalcBudget func(totalSize int64, firstTierSize int64, - o *MergePlanOptions) (budgetNumSegments int) + o *Options) (budgetNumSegments int) // Optional, defaults to mergeplan.ScoreSegments(). - ScoreSegments func(segments []Segment, o *MergePlanOptions) float64 + ScoreSegments func(segments []Segment, o *Options) float64 // Optional. Logger func(string) } // Returns the higher of the input or FloorSegmentSize. -func (o *MergePlanOptions) RaiseToFloorSegmentSize(s int64) int64 { +func (o *Options) RaiseToFloorSegmentSize(s int64) int64 { if s > o.FloorSegmentSize { return s } @@ -122,10 +122,10 @@ const MaxSegmentSizeLimit = 1<<31 - 1 // ErrMaxSegmentSizeTooLarge is returned when the size of the segment // exceeds the MaxSegmentSizeLimit -var ErrMaxSegmentSizeTooLarge = errors.New("MaxSegmentSize exceeds the size limit") +var ErrMaxSegmentSizeTooLarge = errors.New("option MmaxSegmentSize exceeds the size limit") // DefaultMergePlanOptions suggests the default options. -var DefaultMergePlanOptions = MergePlanOptions{ +var DefaultMergePlanOptions = Options{ MaxSegmentsPerTier: 10, MaxSegmentSize: 5000000, TierGrowth: 10.0, @@ -134,20 +134,9 @@ var DefaultMergePlanOptions = MergePlanOptions{ ReclaimDeletesWeight: 2.0, } -// SingleSegmentMergePlanOptions helps in creating a -// single segment index. -var SingleSegmentMergePlanOptions = MergePlanOptions{ - MaxSegmentsPerTier: 1, - MaxSegmentSize: 1 << 30, - TierGrowth: 1.0, - SegmentsPerMergeTask: 10, - FloorSegmentSize: 1 << 30, - ReclaimDeletesWeight: 2.0, -} - // ------------------------------------------- -func plan(segmentsIn []Segment, o *MergePlanOptions) (*MergePlan, error) { +func plan(segmentsIn []Segment, o *Options) (*MergePlan, error) { if len(segmentsIn) <= 1 { return nil, nil } @@ -160,22 +149,7 @@ func plan(segmentsIn []Segment, o *MergePlanOptions) (*MergePlan, error) { sort.Sort(byLiveSizeDescending(segments)) - var minLiveSize int64 = math.MaxInt64 - - var eligibles []Segment - var eligiblesLiveSize int64 - - for _, segment := range segments { - if minLiveSize > segment.LiveSize() { - minLiveSize = segment.LiveSize() - } - - // Only small-enough segments are eligible. - if segment.LiveSize() < o.MaxSegmentSize/2 { - eligibles = append(eligibles, segment) - eligiblesLiveSize += segment.LiveSize() - } - } + minLiveSize, eligiblesLiveSize, eligibles := findLiveSizesAndEligibles(segments, o) minLiveSize = o.RaiseToFloorSegmentSize(minLiveSize) @@ -247,10 +221,26 @@ func plan(segmentsIn []Segment, o *MergePlanOptions) (*MergePlan, error) { return rv, nil } +func findLiveSizesAndEligibles(segments []Segment, o *Options) (minLiveSize, eligiblesLiveSize int64, eligibles []Segment) { + minLiveSize = math.MaxInt64 + for _, segment := range segments { + if minLiveSize > segment.LiveSize() { + minLiveSize = segment.LiveSize() + } + + // Only small-enough segments are eligible. + if segment.LiveSize() < o.MaxSegmentSize/2 { + eligibles = append(eligibles, segment) + eligiblesLiveSize += segment.LiveSize() + } + } + return minLiveSize, eligiblesLiveSize, eligibles +} + // Compute the number of segments that would be needed to cover the // totalSize, by climbing up a logarithmically growing staircase of // segment tiers. -func CalcBudget(totalSize int64, firstTierSize int64, o *MergePlanOptions) ( +func CalcBudget(totalSize, firstTierSize int64, o *Options) ( budgetNumSegments int) { tierSize := firstTierSize if tierSize < 1 { @@ -263,8 +253,8 @@ func CalcBudget(totalSize int64, firstTierSize int64, o *MergePlanOptions) ( } tierGrowth := o.TierGrowth - if tierGrowth < 1.0 { - tierGrowth = 1.0 + if tierGrowth < 1 { + tierGrowth = 1 } for totalSize > 0 { @@ -283,7 +273,7 @@ func CalcBudget(totalSize int64, firstTierSize int64, o *MergePlanOptions) ( } // Of note, removeSegments() keeps the ordering of the results stable. -func removeSegments(segments []Segment, toRemove []Segment) []Segment { +func removeSegments(segments, toRemove []Segment) []Segment { rv := make([]Segment, 0, len(segments)-len(toRemove)) OUTER: for _, segment := range segments { @@ -298,7 +288,7 @@ OUTER: } // Smaller result score is better. -func ScoreSegments(segments []Segment, o *MergePlanOptions) float64 { +func ScoreSegments(segments []Segment, o *Options) float64 { var totBeforeSize int64 var totAfterSize int64 var totAfterSizeFloored int64 @@ -379,7 +369,7 @@ func ToBarChart(prefix string, barMax int, segments []Segment, plan *MergePlan) strings.Repeat("x", barFull-barLive)[0:barFull-barLive] rv = append(rv, fmt.Sprintf("%s %5d: %5d /%5d - %s %s", prefix, - segment.Id(), + segment.ID(), segment.LiveSize(), segment.FullSize(), barKind, bar)) @@ -389,7 +379,7 @@ func ToBarChart(prefix string, barMax int, segments []Segment, plan *MergePlan) } // ValidateMergePlannerOptions validates the merge planner options -func ValidateMergePlannerOptions(options *MergePlanOptions) error { +func ValidateMergePlannerOptions(options *Options) error { if options.MaxSegmentSize > MaxSegmentSizeLimit { return ErrMaxSegmentSizeTooLarge } diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/mergeplan/sort.go b/vendor/github.com/blugelabs/bluge/index/mergeplan/sort.go similarity index 92% rename from vendor/github.com/blevesearch/bleve/v2/index/scorch/mergeplan/sort.go rename to vendor/github.com/blugelabs/bluge/index/mergeplan/sort.go index d044b8d7c..44572189c 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/mergeplan/sort.go +++ b/vendor/github.com/blugelabs/bluge/index/mergeplan/sort.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,5 +24,5 @@ func (a byLiveSizeDescending) Less(i, j int) bool { if a[i].LiveSize() != a[j].LiveSize() { return a[i].LiveSize() > a[j].LiveSize() } - return a[i].Id() < a[j].Id() + return a[i].ID() < a[j].ID() } diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/optimize.go b/vendor/github.com/blugelabs/bluge/index/optimize.go similarity index 68% rename from vendor/github.com/blevesearch/bleve/v2/index/scorch/optimize.go rename to vendor/github.com/blugelabs/bluge/index/optimize.go index 3c7969fa9..aba5c4ad0 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/optimize.go +++ b/vendor/github.com/blugelabs/bluge/index/optimize.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,68 +12,62 @@ // See the License for the specific language governing permissions and // limitations under the License. -package scorch +package index import ( "fmt" - "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" "sync/atomic" -) -var OptimizeConjunction = true -var OptimizeConjunctionUnadorned = true -var OptimizeDisjunctionUnadorned = true + "github.com/RoaringBitmap/roaring" + segment "github.com/blugelabs/bluge_segment_api" +) -func (s *IndexSnapshotTermFieldReader) Optimize(kind string, - octx index.OptimizableContext) (index.OptimizableContext, error) { - if OptimizeConjunction && kind == "conjunction" { - return s.optimizeConjunction(octx) +func (i *postingsIterator) Optimize(kind string, + octx segment.OptimizableContext) (segment.OptimizableContext, error) { + if i.snapshot.parent.config.OptimizeConjunction && kind == "conjunction" { + return i.optimizeConjunction(octx) } - if OptimizeConjunctionUnadorned && kind == "conjunction:unadorned" { - return s.optimizeConjunctionUnadorned(octx) + if i.snapshot.parent.config.OptimizeConjunctionUnadorned && kind == "conjunction:unadorned" { + return i.optimizeConjunctionUnadorned(octx) } - if OptimizeDisjunctionUnadorned && kind == "disjunction:unadorned" { - return s.optimizeDisjunctionUnadorned(octx) + if i.snapshot.parent.config.OptimizeDisjunctionUnadorned && kind == "disjunction:unadorned" { + return i.optimizeDisjunctionUnadorned(octx) } return nil, nil } -var OptimizeDisjunctionUnadornedMinChildCardinality = uint64(256) - // ---------------------------------------------------------------- -func (s *IndexSnapshotTermFieldReader) optimizeConjunction( - octx index.OptimizableContext) (index.OptimizableContext, error) { +func (i *postingsIterator) optimizeConjunction( + octx segment.OptimizableContext) (segment.OptimizableContext, error) { if octx == nil { - octx = &OptimizeTFRConjunction{snapshot: s.snapshot} + octx = &optimizeConjunction{snapshot: i.snapshot} } - o, ok := octx.(*OptimizeTFRConjunction) + o, ok := octx.(*optimizeConjunction) if !ok { return octx, nil } - if o.snapshot != s.snapshot { + if o.snapshot != i.snapshot { return nil, fmt.Errorf("tried to optimize conjunction across different snapshots") } - o.tfrs = append(o.tfrs, s) + o.tfrs = append(o.tfrs, i) return o, nil } -type OptimizeTFRConjunction struct { - snapshot *IndexSnapshot +type optimizeConjunction struct { + snapshot *Snapshot - tfrs []*IndexSnapshotTermFieldReader + tfrs []*postingsIterator } -func (o *OptimizeTFRConjunction) Finish() (index.Optimized, error) { +func (o *optimizeConjunction) Finish() (segment.PostingsIterator, error) { if len(o.tfrs) <= 1 { return nil, nil } @@ -121,48 +115,49 @@ func (o *OptimizeTFRConjunction) Finish() (index.Optimized, error) { // additional or subsidiary information like freq-norm's and // term-vectors are not required, and instead only the internal-id's // are needed. -func (s *IndexSnapshotTermFieldReader) optimizeConjunctionUnadorned( - octx index.OptimizableContext) (index.OptimizableContext, error) { +func (i *postingsIterator) optimizeConjunctionUnadorned( + octx segment.OptimizableContext) (segment.OptimizableContext, error) { if octx == nil { - octx = &OptimizeTFRConjunctionUnadorned{snapshot: s.snapshot} + octx = &optimizeConjunctionUnadorned{snapshot: i.snapshot} } - o, ok := octx.(*OptimizeTFRConjunctionUnadorned) + o, ok := octx.(*optimizeConjunctionUnadorned) if !ok { return nil, nil } - if o.snapshot != s.snapshot { + if o.snapshot != i.snapshot { return nil, fmt.Errorf("tried to optimize unadorned conjunction across different snapshots") } - o.tfrs = append(o.tfrs, s) + o.tfrs = append(o.tfrs, i) return o, nil } -type OptimizeTFRConjunctionUnadorned struct { - snapshot *IndexSnapshot +type optimizeConjunctionUnadorned struct { + snapshot *Snapshot - tfrs []*IndexSnapshotTermFieldReader + tfrs []*postingsIterator } -var OptimizeTFRConjunctionUnadornedTerm = []byte("") -var OptimizeTFRConjunctionUnadornedField = "*" +var optimizeConjunctionUnadornedTerm = []byte("") + +const optimizeConjunctionUnadornedField = "*" // Finish of an unadorned conjunction optimization will compute a // termFieldReader with an "actual" bitmap that represents the // constituent bitmaps AND'ed together. This termFieldReader cannot // provide any freq-norm or termVector associated information. -func (o *OptimizeTFRConjunctionUnadorned) Finish() (rv index.Optimized, err error) { +func (o *optimizeConjunctionUnadorned) Finish() (rv segment.PostingsIterator, err error) { if len(o.tfrs) <= 1 { return nil, nil } // We use an artificial term and field because the optimized // termFieldReader can represent multiple terms and fields. - oTFR := o.snapshot.unadornedTermFieldReader( - OptimizeTFRConjunctionUnadornedTerm, OptimizeTFRConjunctionUnadornedField) + oTFR := o.snapshot.unadornedPostingsIterator( + optimizeConjunctionUnadornedTerm, optimizeConjunctionUnadornedField) var actualBMs []*roaring.Bitmap // Collected from regular posting lists. @@ -174,7 +169,7 @@ OUTER: var docNum1HitLastOk bool for _, tfr := range o.tfrs { - if _, ok := tfr.iterators[i].(*emptyPostingsIterator); ok { + if tfr.iterators[i].Empty() { // An empty postings iterator means the entire AND is empty. oTFR.iterators[i] = anEmptyPostingsIterator continue OUTER @@ -267,42 +262,43 @@ OUTER: // additional or subsidiary information like freq-norm's and // term-vectors are not required, and instead only the internal-id's // are needed. -func (s *IndexSnapshotTermFieldReader) optimizeDisjunctionUnadorned( - octx index.OptimizableContext) (index.OptimizableContext, error) { +func (i *postingsIterator) optimizeDisjunctionUnadorned( + octx segment.OptimizableContext) (segment.OptimizableContext, error) { if octx == nil { - octx = &OptimizeTFRDisjunctionUnadorned{ - snapshot: s.snapshot, - } + octx = &optimizeDisjunctionUnadorned{snapshot: i.snapshot} } - o, ok := octx.(*OptimizeTFRDisjunctionUnadorned) + o, ok := octx.(*optimizeDisjunctionUnadorned) if !ok { return nil, nil } - if o.snapshot != s.snapshot { + if o.snapshot != i.snapshot { return nil, fmt.Errorf("tried to optimize unadorned disjunction across different snapshots") } - o.tfrs = append(o.tfrs, s) + o.tfrs = append(o.tfrs, i) return o, nil } -type OptimizeTFRDisjunctionUnadorned struct { - snapshot *IndexSnapshot +type optimizeDisjunctionUnadorned struct { + snapshot *Snapshot - tfrs []*IndexSnapshotTermFieldReader + tfrs []*postingsIterator } -var OptimizeTFRDisjunctionUnadornedTerm = []byte("") -var OptimizeTFRDisjunctionUnadornedField = "*" +var optimizeDisjunctionUnadornedTerm = []byte("") + +const optimizeDisjunctionUnadornedField = "*" + +const preferHeapOr = 2 // Finish of an unadorned disjunction optimization will compute a // termFieldReader with an "actual" bitmap that represents the // constituent bitmaps OR'ed together. This termFieldReader cannot // provide any freq-norm or termVector associated information. -func (o *OptimizeTFRDisjunctionUnadorned) Finish() (rv index.Optimized, err error) { +func (o *optimizeDisjunctionUnadorned) Finish() (rv segment.PostingsIterator, err error) { if len(o.tfrs) <= 1 { return nil, nil } @@ -327,8 +323,8 @@ func (o *OptimizeTFRDisjunctionUnadorned) Finish() (rv index.Optimized, err erro // We use an artificial term and field because the optimized // termFieldReader can represent multiple terms and fields. - oTFR := o.snapshot.unadornedTermFieldReader( - OptimizeTFRDisjunctionUnadornedTerm, OptimizeTFRDisjunctionUnadornedField) + oTFR := o.snapshot.unadornedPostingsIterator( + optimizeDisjunctionUnadornedTerm, optimizeDisjunctionUnadornedField) var docNums []uint32 // Collected docNum's from 1-hit posting lists. var actualBMs []*roaring.Bitmap // Collected from regular posting lists. @@ -355,9 +351,9 @@ func (o *OptimizeTFRDisjunctionUnadorned) Finish() (rv index.Optimized, err erro } var bm *roaring.Bitmap - if len(actualBMs) > 2 { + if len(actualBMs) > preferHeapOr { bm = roaring.HeapOr(actualBMs...) - } else if len(actualBMs) == 2 { + } else if len(actualBMs) == preferHeapOr { bm = roaring.Or(actualBMs[0], actualBMs[1]) } else if len(actualBMs) == 1 { bm = actualBMs[0].Clone() @@ -375,22 +371,3 @@ func (o *OptimizeTFRDisjunctionUnadorned) Finish() (rv index.Optimized, err erro atomic.AddUint64(&o.snapshot.parent.stats.TotTermSearchersStarted, uint64(1)) return oTFR, nil } - -// ---------------------------------------------------------------- - -func (i *IndexSnapshot) unadornedTermFieldReader( - term []byte, field string) *IndexSnapshotTermFieldReader { - // This IndexSnapshotTermFieldReader will not be recycled, more - // conversation here: https://github.com/blevesearch/bleve/pull/1438 - return &IndexSnapshotTermFieldReader{ - term: term, - field: field, - snapshot: i, - iterators: make([]segment.PostingsIterator, len(i.segment)), - segmentOffset: 0, - includeFreq: false, - includeNorm: false, - includeTermVectors: false, - recycle: false, - } -} diff --git a/vendor/github.com/blugelabs/bluge/index/persister.go b/vendor/github.com/blugelabs/bluge/index/persister.go new file mode 100644 index 000000000..f628cc035 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/persister.go @@ -0,0 +1,379 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "fmt" + "sync/atomic" + "time" + + "github.com/RoaringBitmap/roaring" + segment "github.com/blugelabs/bluge_segment_api" +) + +func (s *Writer) persisterLoop(merges chan *segmentMerge, persists chan *persistIntroduction, + introducerNotifier, persisterNotifier watcherChan, lastPersistedEpoch uint64) { + defer s.asyncTasks.Done() + + var persistWatchers epochWatchers + var lastMergedEpoch uint64 + var ew *epochWatcher + + var unpersistedCallbacks []func(error) + + // tell the introducer we're waiting for changes after the initial epoch + introducerEpochWatcher, err := introducerNotifier.NotifyUsAfter(0, s.closeCh) + if err != nil { + return + } + +OUTER: + for { + atomic.AddUint64(&s.stats.TotPersistLoopBeg, 1) + atomic.AddUint64(&s.stats.TotPersistLoopWait, 1) + + select { + case <-s.closeCh: + break OUTER + case ew = <-persisterNotifier: + persistWatchers.Add(ew) + lastMergedEpoch = ew.epoch + case <-introducerEpochWatcher.notifyCh: + // woken up, next loop should pick up work + atomic.AddUint64(&s.stats.TotPersistLoopWaitNotified, 1) + + lastMergedEpoch, persistWatchers = s.pausePersisterForMergerCatchUp(persisterNotifier, lastPersistedEpoch, + lastMergedEpoch, persistWatchers) + + var ourSnapshot *Snapshot + var ourPersisted []chan error + var ourPersistedCallbacks []func(error) + + // check to see if there is a new snapshot to persist + s.rootLock.Lock() + if s.root != nil && s.root.epoch > lastPersistedEpoch { + ourSnapshot = s.root + ourSnapshot.addRef() + ourPersisted = s.rootPersisted + s.rootPersisted = nil + ourPersistedCallbacks = s.persistedCallbacks + s.persistedCallbacks = nil + atomic.StoreUint64(&s.stats.persistSnapshotSize, uint64(ourSnapshot.Size())) + atomic.StoreUint64(&s.stats.persistEpoch, ourSnapshot.epoch) + } + s.rootLock.Unlock() + + if ourSnapshot != nil { + startTime := time.Now() + + err = s.persistSnapshot(merges, persists, ourSnapshot) + for _, ch := range ourPersisted { + if err != nil { + ch <- err + } + close(ch) + } + if err != nil { + atomic.StoreUint64(&s.stats.persistEpoch, 0) + if err == segment.ErrClosed { + // index has been closed + _ = ourSnapshot.Close() + break OUTER + } + + // save this current snapshot's persistedCallbacks, to invoke during + // the retry attempt + unpersistedCallbacks = append(unpersistedCallbacks, ourPersistedCallbacks...) + + s.fireAsyncError(fmt.Errorf("got err persisting snapshot: %v", err)) + _ = ourSnapshot.Close() + atomic.AddUint64(&s.stats.TotPersistLoopErr, 1) + continue OUTER + } + + if unpersistedCallbacks != nil { + // in the event of this being a retry attempt for persisting a snapshot + // that had earlier failed, prepend the persistedCallbacks associated + // with earlier segment(s) to the latest persistedCallbacks + ourPersistedCallbacks = append(unpersistedCallbacks, ourPersistedCallbacks...) + unpersistedCallbacks = nil + } + + for i := range ourPersistedCallbacks { + ourPersistedCallbacks[i](err) + } + + atomic.StoreUint64(&s.stats.LastPersistedEpoch, ourSnapshot.epoch) + + lastPersistedEpoch = ourSnapshot.epoch + for _, ew := range persistWatchers { + close(ew.notifyCh) + } + + persistWatchers = nil + _ = ourSnapshot.Close() + + s.fireEvent(EventKindPersisterProgress, time.Since(startTime)) + + if s.currentEpoch() != lastPersistedEpoch { + atomic.AddUint64(&s.stats.TotPersistLoopProgress, 1) + continue OUTER + } + } + + // tell the introducer we're waiting for changes after lastPersistedEpoch + introducerEpochWatcher, err = introducerNotifier.NotifyUsAfter(lastPersistedEpoch, s.closeCh) + if err != nil { + break OUTER + } + + err = s.deletionPolicy.Cleanup(s.directory) // might as well cleanup while waiting + if err != nil { + s.config.AsyncError(err) + } + } + + atomic.AddUint64(&s.stats.TotPersistLoopEnd, 1) + } +} + +func (s *Writer) pausePersisterForMergerCatchUp(persisterNotifier watcherChan, + lastPersistedEpoch, lastMergedEpoch uint64, persistWatchers epochWatchers) (uint64, epochWatchers) { + // First, let the watchers proceed if they lag behind + persistWatchers.NotifySatisfiedWatchers(lastPersistedEpoch) + + // Check the merger lag by counting the segment files on disk, + numFilesOnDisk, _ := s.directory.Stats() + + // On finding fewer files on disk, persister takes a short pause + // for sufficient in-memory segments to pile up for the next + // memory merge cum persist loop. + if numFilesOnDisk < uint64(s.config.PersisterNapUnderNumFiles) && + s.config.PersisterNapTimeMSec > 0 && s.numEventsBlocking() == 0 { + select { + case <-s.closeCh: + case <-time.After(time.Millisecond * time.Duration(s.config.PersisterNapTimeMSec)): + atomic.AddUint64(&s.stats.TotPersisterNapPauseCompleted, 1) + + case ew := <-persisterNotifier: + // unblock the merger in meantime + persistWatchers.Add(ew) + lastMergedEpoch = ew.epoch + persistWatchers.NotifySatisfiedWatchers(lastPersistedEpoch) + atomic.AddUint64(&s.stats.TotPersisterMergerNapBreak, 1) + } + return lastMergedEpoch, persistWatchers + } + + // Finding too many files on disk could be due to two reasons. + // 1. Too many older snapshots awaiting the clean up. + // 2. The merger could be lagging behind on merging the disk files. + if numFilesOnDisk > uint64(s.config.PersisterNapUnderNumFiles) { + err := s.deletionPolicy.Cleanup(s.directory) + if err != nil { + s.config.AsyncError(err) + } + numFilesOnDisk, _ = s.directory.Stats() + } + + // Persister pause until the merger catches up to reduce the segment + // file count under the threshold. + // But if there is memory pressure, then skip this sleep maneuvers. +OUTER: + for s.config.PersisterNapUnderNumFiles > 0 && + numFilesOnDisk >= uint64(s.config.PersisterNapUnderNumFiles) && + lastMergedEpoch < lastPersistedEpoch { + atomic.AddUint64(&s.stats.TotPersisterSlowMergerPause, 1) + + select { + case <-s.closeCh: + break OUTER + case ew := <-persisterNotifier: + persistWatchers.Add(ew) + lastMergedEpoch = ew.epoch + } + + atomic.AddUint64(&s.stats.TotPersisterSlowMergerResume, 1) + + // let the watchers proceed if they lag behind + persistWatchers.NotifySatisfiedWatchers(lastPersistedEpoch) + + numFilesOnDisk, _ = s.directory.Stats() + } + + return lastMergedEpoch, persistWatchers +} + +func (s *Writer) persistSnapshot(merges chan *segmentMerge, persists chan *persistIntroduction, snapshot *Snapshot) error { + // Perform in-memory segment merging only when the memory pressure is + // below the configured threshold, else the persister performs the + // direct persistence of segments. + if s.numEventsBlocking() < s.config.MemoryPressurePauseThreshold { + persisted, err := s.persistSnapshotMaybeMerge(merges, persists, snapshot) + if err != nil { + return err + } + if persisted { + return nil + } + } + + return s.persistSnapshotDirect(persists, snapshot) +} + +// persistSnapshotMaybeMerge examines the snapshot and might merge and +// persist the in-memory zap segments if there are enough of them +func (s *Writer) persistSnapshotMaybeMerge(merges chan *segmentMerge, persists chan *persistIntroduction, snapshot *Snapshot) ( + bool, error) { + // collect the in-memory zap segments (SegmentBase instances) + var sbs []segment.Segment + var sbsDrops []*roaring.Bitmap + var sbsIndexes []int + + for i, segmentSnapshot := range snapshot.segment { + if !segmentSnapshot.segment.Persisted() { + sbs = append(sbs, segmentSnapshot.segment.Segment) + sbsDrops = append(sbsDrops, segmentSnapshot.deleted) + sbsIndexes = append(sbsIndexes, i) + } + } + + if len(sbs) < s.config.MinSegmentsForInMemoryMerge { + return false, nil + } + + newSnapshot, newSegmentID, err := s.mergeSegmentBases( + merges, snapshot, sbs, sbsDrops, sbsIndexes) + if err != nil { + return false, err + } + if newSnapshot == nil { + return false, nil + } + + defer func() { + _ = newSnapshot.Close() + }() + + mergedSegmentIDs := map[uint64]struct{}{} + for _, idx := range sbsIndexes { + mergedSegmentIDs[snapshot.segment[idx].id] = struct{}{} + } + + // construct a snapshot that's logically equivalent to the input + // snapshot, but with merged segments replaced by the new segment + equiv := &Snapshot{ + parent: snapshot.parent, + segment: make([]*segmentSnapshot, 0, len(snapshot.segment)), + epoch: snapshot.epoch, + creator: "persistSnapshotMaybeMerge", + } + + // copy to the equiv the segments that weren't replaced + for _, segment := range snapshot.segment { + if _, wasMerged := mergedSegmentIDs[segment.id]; !wasMerged { + equiv.segment = append(equiv.segment, segment) + } + } + + // append to the equiv the new segment + for _, segment := range newSnapshot.segment { + if segment.id == newSegmentID { + equiv.segment = append(equiv.segment, &segmentSnapshot{ + id: newSegmentID, + segment: segment.segment, + deleted: nil, // nil since merging handled deletions + }) + break + } + } + + err = s.persistSnapshotDirect(persists, equiv) + if err != nil { + return false, err + } + + return true, nil +} + +func (s *Writer) persistSnapshotDirect(persists chan *persistIntroduction, snapshot *Snapshot) (err error) { + // first ensure that each segment in this snapshot has been persisted + var newSegmentIds []uint64 + for _, segmentSnapshot := range snapshot.segment { + if !segmentSnapshot.segment.Persisted() { + err = s.directory.Persist(ItemKindSegment, segmentSnapshot.id, segmentSnapshot.segment.Segment, s.closeCh) + if err != nil { + return fmt.Errorf("error persisting segment: %v", err) + } + newSegmentIds = append(newSegmentIds, segmentSnapshot.id) + } + } + + if len(newSegmentIds) > 0 { + err = s.prepareIntroducePersist(persists, newSegmentIds) + if err != nil { + return err + } + } + + err = s.directory.Persist(ItemKindSnapshot, snapshot.epoch, snapshot, s.closeCh) + if err != nil { + return err + } + + s.deletionPolicy.Commit(snapshot) + + return nil +} + +func (s *Writer) prepareIntroducePersist(persists chan *persistIntroduction, newSegmentIds []uint64) error { + // now try to open all the new snapshots + newSegments := make(map[uint64]*segmentWrapper) + defer func() { + for _, s := range newSegments { + if s != nil { + // cleanup segments that were opened but not + // swapped into the new root + _ = s.Close() + } + } + }() + var err error + for _, segmentID := range newSegmentIds { + newSegments[segmentID], err = s.loadSegment(segmentID, s.segPlugin) + if err != nil { + return fmt.Errorf("error opening new segment %d, %v", segmentID, err) + } + } + + persist := &persistIntroduction{ + persisted: newSegments, + applied: make(notificationChan), + } + + select { + case <-s.closeCh: + return segment.ErrClosed + case persists <- persist: + } + + select { + case <-s.closeCh: + return segment.ErrClosed + case <-persist.applied: + } + + return nil +} diff --git a/vendor/github.com/blugelabs/bluge/index/postings.go b/vendor/github.com/blugelabs/bluge/index/postings.go new file mode 100644 index 000000000..e19fee500 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/postings.go @@ -0,0 +1,137 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "fmt" + "sync/atomic" + + segment "github.com/blugelabs/bluge_segment_api" +) + +type postingsIterator struct { + term []byte + field string + snapshot *Snapshot + dicts []segment.Dictionary + postings []segment.PostingsList + iterators []segment.PostingsIterator + segmentOffset int + includeFreq bool + includeNorm bool + includeTermVectors bool + currPosting segment.Posting + currID uint64 + recycle bool +} + +func (i *postingsIterator) Size() int { + sizeInBytes := reflectStaticSizeIndexSnapshotTermFieldReader + sizeOfPtr + + len(i.term) + + len(i.field) + + sizeOfInt + + for _, entry := range i.postings { + sizeInBytes += entry.Size() + } + + for _, entry := range i.iterators { + sizeInBytes += entry.Size() + } + + if i.currPosting != nil { + sizeInBytes += i.currPosting.Size() + } + + return sizeInBytes +} + +func (i *postingsIterator) Next() (segment.Posting, error) { + // find the next hit + for i.segmentOffset < len(i.iterators) { + next, err := i.iterators[i.segmentOffset].Next() + if err != nil { + return nil, err + } + if next != nil { + rvNumber := next.Number() + i.snapshot.offsets[i.segmentOffset] + next.SetNumber(rvNumber) + i.currID = rvNumber + i.currPosting = next + return next, nil + } + i.segmentOffset++ + } + return nil, nil +} + +func (i *postingsIterator) Advance(number uint64) (segment.Posting, error) { + // FIXME do something better + // for now, if we need to seek backwards, then restart from the beginning + if i.currPosting != nil && i.currID >= number { + i2, err := i.snapshot.PostingsIterator(i.term, i.field, + i.includeFreq, i.includeNorm, i.includeTermVectors) + if err != nil { + return nil, err + } + // close the current term field reader before replacing it with a new one + _ = i.Close() + *i = *(i2.(*postingsIterator)) + } + segIndex, ldocNum := i.snapshot.segmentIndexAndLocalDocNumFromGlobal(number) + if segIndex >= len(i.snapshot.segment) { + return nil, fmt.Errorf("computed segment index %d out of bounds %d", + segIndex, len(i.snapshot.segment)) + } + // skip directly to the target segment + i.segmentOffset = segIndex + next, err := i.iterators[i.segmentOffset].Advance(ldocNum) + if err != nil { + return nil, err + } + if next == nil { + // we jumped directly to the segment that should have contained it + // but it wasn't there, so reuse Next() which should correctly + // get the next hit after it (we moved i.segmentOffset) + return i.Next() + } + + rvNumber := next.Number() + i.snapshot.offsets[i.segmentOffset] + next.SetNumber(rvNumber) + i.currID = rvNumber + i.currPosting = next + return next, nil +} + +func (i *postingsIterator) Count() uint64 { + var rv uint64 + for _, posting := range i.postings { + rv += posting.Count() + } + return rv +} + +func (i *postingsIterator) Empty() bool { + count := i.Count() + return count == 0 +} + +func (i *postingsIterator) Close() error { + if i.snapshot != nil { + atomic.AddUint64(&i.snapshot.parent.stats.TotTermSearchersFinished, uint64(1)) + i.snapshot.recyclePostingsIterator(i) + } + return nil +} diff --git a/vendor/github.com/blugelabs/bluge/index/postings_all.go b/vendor/github.com/blugelabs/bluge/index/postings_all.go new file mode 100644 index 000000000..e7b14b8d6 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/postings_all.go @@ -0,0 +1,112 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "fmt" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/RoaringBitmap/roaring" +) + +type postingsIteratorAll struct { + snapshot *Snapshot + iterators []roaring.IntPeekable + segmentOffset int + + preAlloc virtualPosting +} + +func (i *postingsIteratorAll) Size() int { + return reflectStaticSizeIndexSnapshotDocIDReader + sizeOfPtr +} + +func (i *postingsIteratorAll) Next() (segment.Posting, error) { + for i.segmentOffset < len(i.iterators) { + if !i.iterators[i.segmentOffset].HasNext() { + i.segmentOffset++ + continue + } + next := i.iterators[i.segmentOffset].Next() + // make segment number into global number by adding offset + globalOffset := i.snapshot.offsets[i.segmentOffset] + i.preAlloc.number = uint64(next) + globalOffset + return &i.preAlloc, nil + } + return nil, nil +} + +func (i *postingsIteratorAll) Advance(number uint64) (segment.Posting, error) { + segIndex, localDocNum := i.snapshot.segmentIndexAndLocalDocNumFromGlobal(number) + if segIndex >= len(i.snapshot.segment) { + return nil, fmt.Errorf("computed segment index %d out of bounds %d", + segIndex, len(i.snapshot.segment)) + } + // skip directly to the target segment + i.segmentOffset = segIndex + + // now advance within this segment + i.iterators[i.segmentOffset].AdvanceIfNeeded(uint32(localDocNum)) + + // let next do the rest of the work for us + return i.Next() +} + +func (i *postingsIteratorAll) Count() uint64 { + rv, _ := i.snapshot.Count() + return rv +} + +func (i *postingsIteratorAll) Close() error { + return nil +} + +func (i *postingsIteratorAll) Empty() bool { + return i.Count() == 0 +} + +type virtualPosting struct { + term string + number uint64 +} + +func (v *virtualPosting) Term() string { + return v.term +} + +func (v *virtualPosting) Number() uint64 { + return v.number +} + +func (v *virtualPosting) SetNumber(n uint64) { + v.number = n +} + +func (v *virtualPosting) Frequency() int { + return 1 +} + +func (v *virtualPosting) Norm() float64 { + return 1 +} + +func (v *virtualPosting) Locations() []segment.Location { + return nil +} + +func (v *virtualPosting) Size() int { + return 0 +} diff --git a/vendor/github.com/blugelabs/bluge/index/segment.go b/vendor/github.com/blugelabs/bluge/index/segment.go new file mode 100644 index 000000000..ff54ab3cd --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/segment.go @@ -0,0 +1,92 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "github.com/RoaringBitmap/roaring" + segment "github.com/blugelabs/bluge_segment_api" +) + +type SegmentSnapshot interface { + ID() uint64 + Deleted() *roaring.Bitmap +} + +type segmentSnapshot struct { + id uint64 + segment *segmentWrapper + deleted *roaring.Bitmap + creator string + segmentType string + segmentVersion uint32 +} + +func (s *segmentSnapshot) Segment() segment.Segment { + return s.segment +} + +func (s *segmentSnapshot) Deleted() *roaring.Bitmap { + return s.deleted +} + +func (s *segmentSnapshot) ID() uint64 { + return s.id +} + +func (s *segmentSnapshot) FullSize() int64 { + return int64(s.segment.Count()) +} + +func (s segmentSnapshot) LiveSize() int64 { + return int64(s.Count()) +} + +func (s *segmentSnapshot) Close() error { + return s.segment.Close() +} + +func (s *segmentSnapshot) VisitDocument(num uint64, visitor segment.StoredFieldVisitor) error { + return s.segment.VisitStoredFields(num, visitor) +} + +func (s *segmentSnapshot) Count() uint64 { + rv := s.segment.Count() + if s.deleted != nil { + rv -= s.deleted.GetCardinality() + } + return rv +} + +// DocNumbersLive returns a bitmap containing doc numbers for all live docs +func (s *segmentSnapshot) DocNumbersLive() *roaring.Bitmap { + rv := roaring.NewBitmap() + rv.AddRange(0, s.segment.Count()) + if s.deleted != nil { + rv.AndNot(s.deleted) + } + return rv +} + +func (s *segmentSnapshot) Fields() []string { + return s.segment.Fields() +} + +func (s *segmentSnapshot) Size() (rv int) { + rv = s.segment.Size() + if s.deleted != nil { + rv += int(s.deleted.GetSizeInBytes()) + } + return +} diff --git a/vendor/github.com/blugelabs/bluge/index/segment_plugin.go b/vendor/github.com/blugelabs/bluge/index/segment_plugin.go new file mode 100644 index 000000000..395aed9fa --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/segment_plugin.go @@ -0,0 +1,117 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "fmt" + "io" + "sync" + + "github.com/RoaringBitmap/roaring" + segment "github.com/blugelabs/bluge_segment_api" +) + +type SegmentPlugin struct { + Type string + Version uint32 + New func(results []segment.Document, normCalc func(string, int) float32) (segment.Segment, uint64, error) + Load func(*segment.Data) (segment.Segment, error) + Merge func([]segment.Segment, []*roaring.Bitmap, int) segment.Merger +} + +func supportedSegmentTypes(supportedSegmentPlugins map[string]map[uint32]*SegmentPlugin) (rv []string) { + for k := range supportedSegmentPlugins { + rv = append(rv, k) + } + return +} + +func supportedSegmentTypeVersions(supportedSegmentPlugins map[string]map[uint32]*SegmentPlugin, typ string) ( + rv []uint32) { + for k := range supportedSegmentPlugins[typ] { + rv = append(rv, k) + } + return rv +} + +func loadSegmentPlugin(supportedSegmentPlugins map[string]map[uint32]*SegmentPlugin, + forcedSegmentType string, forcedSegmentVersion uint32) (*SegmentPlugin, error) { + if versions, ok := supportedSegmentPlugins[forcedSegmentType]; ok { + if segPlugin, ok := versions[forcedSegmentVersion]; ok { + return segPlugin, nil + } + return nil, fmt.Errorf( + "unsupported version %d for segment type: %s, supported: %v", + forcedSegmentVersion, forcedSegmentType, + supportedSegmentTypeVersions(supportedSegmentPlugins, forcedSegmentType)) + } + return nil, fmt.Errorf("unsupported segment type: %s, supported: %v", + forcedSegmentType, supportedSegmentTypes(supportedSegmentPlugins)) +} + +func (s *Writer) newSegment(results []segment.Document) (*segmentWrapper, uint64, error) { + seg, count, err := s.segPlugin.New(results, s.config.NormCalc) + return &segmentWrapper{ + Segment: seg, + refCounter: noOpRefCounter{}, + }, count, err +} + +type segmentWrapper struct { + segment.Segment + refCounter + persisted bool +} + +func (s segmentWrapper) Persisted() bool { + return s.persisted +} + +func (s segmentWrapper) Close() error { + return s.DecRef() +} + +type refCounter interface { + AddRef() + DecRef() error +} + +type noOpRefCounter struct{} + +func (noOpRefCounter) AddRef() {} +func (noOpRefCounter) DecRef() error { return nil } + +type closeOnLastRefCounter struct { + closer io.Closer + m sync.Mutex + refs int64 +} + +func (c *closeOnLastRefCounter) AddRef() { + c.m.Lock() + c.refs++ + c.m.Unlock() +} + +func (c *closeOnLastRefCounter) DecRef() error { + c.m.Lock() + c.refs-- + var err error + if c.refs == 0 && c.closer != nil { + err = c.closer.Close() + } + c.m.Unlock() + return err +} diff --git a/vendor/github.com/blugelabs/bluge/index/sizes.go b/vendor/github.com/blugelabs/bluge/index/sizes.go new file mode 100644 index 000000000..57d3339d6 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/sizes.go @@ -0,0 +1,49 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "reflect" +) + +func init() { + var i int + sizeOfInt = int(reflect.TypeOf(i).Size()) + var ptr *int + sizeOfPtr = int(reflect.TypeOf(ptr).Size()) + + var pi postingsIterator + reflectStaticSizeIndexSnapshotTermFieldReader = int(reflect.TypeOf(pi).Size()) + var pia postingsIteratorAll + reflectStaticSizeIndexSnapshotDocIDReader = int(reflect.TypeOf(pia).Size()) + var is interface{} = Snapshot{} + reflectStaticSizeIndexSnapshot = int(reflect.TypeOf(is).Size()) + var pib unadornedPostingsIteratorBitmap + reflectStaticSizeUnadornedPostingsIteratorBitmap = int(reflect.TypeOf(pib).Size()) + var pi1h unadornedPostingsIterator1Hit + reflectStaticSizeUnadornedPostingsIterator1Hit = int(reflect.TypeOf(pi1h).Size()) + var up unadornedPosting + reflectStaticSizeUnadornedPosting = int(reflect.TypeOf(up).Size()) +} + +var sizeOfInt int +var sizeOfPtr int + +var reflectStaticSizeIndexSnapshotTermFieldReader int +var reflectStaticSizeIndexSnapshotDocIDReader int +var reflectStaticSizeIndexSnapshot int +var reflectStaticSizeUnadornedPostingsIteratorBitmap int +var reflectStaticSizeUnadornedPostingsIterator1Hit int +var reflectStaticSizeUnadornedPosting int diff --git a/vendor/github.com/blugelabs/bluge/index/snapshot.go b/vendor/github.com/blugelabs/bluge/index/snapshot.go new file mode 100644 index 000000000..473893b28 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/snapshot.go @@ -0,0 +1,775 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "bufio" + "bytes" + "container/heap" + "encoding/binary" + "fmt" + "io" + "sort" + "sync" + "sync/atomic" + + "github.com/RoaringBitmap/roaring" + segment "github.com/blugelabs/bluge_segment_api" +) + +type asyncSegmentResult struct { + dict segment.Dictionary + dictItr segment.DictionaryIterator + + index int + docs *roaring.Bitmap + + err error +} + +type Snapshot struct { + parent *Writer + segment []*segmentSnapshot + offsets []uint64 + epoch uint64 + size uint64 + creator string + + m sync.Mutex // Protects the fields that follow. + refs int64 + + m2 sync.Mutex // Protects the fields that follow. + fieldTFRs map[string][]*postingsIterator // keyed by field, recycled TFR's +} + +func (i *Snapshot) Segments() []SegmentSnapshot { + rv := make([]SegmentSnapshot, len(i.segment)) + for j := range i.segment { + rv[j] = i.segment[j] + } + return rv +} + +func (i *Snapshot) addRef() { + i.m.Lock() + i.refs++ + i.m.Unlock() +} + +func (i *Snapshot) decRef() (err error) { + i.m.Lock() + i.refs-- + if i.refs == 0 { + for _, s := range i.segment { + if s != nil { + err2 := s.segment.DecRef() + if err == nil { + err = err2 + } + } + } + } + i.m.Unlock() + return err +} + +func (i *Snapshot) Close() error { + return i.decRef() +} + +func (i *Snapshot) Size() int { + return int(i.size) +} + +func (i *Snapshot) updateSize() { + i.size += uint64(reflectStaticSizeIndexSnapshot) + for _, s := range i.segment { + i.size += uint64(s.Size()) + } +} + +func (i *Snapshot) newDictionary(field string, + makeItr func(i segment.Dictionary) segment.DictionaryIterator, + randomLookup bool) (*dictionary, error) { + results := make(chan *asyncSegmentResult) + for _, seg := range i.segment { + go func(segment *segmentSnapshot) { + dict, err := segment.segment.Dictionary(field) + if err != nil { + results <- &asyncSegmentResult{err: err} + } else { + if randomLookup { + results <- &asyncSegmentResult{dict: dict} + } else { + results <- &asyncSegmentResult{dictItr: makeItr(dict)} + } + } + }(seg) + } + + var err error + rv := &dictionary{ + snapshot: i, + cursors: make([]*segmentDictCursor, 0, len(i.segment)), + } + for count := 0; count < len(i.segment); count++ { + asr := <-results + if asr.err != nil && err == nil { + err = asr.err + } else { + if !randomLookup { + next, err2 := asr.dictItr.Next() + if err2 != nil && err == nil { + err = err2 + } + if next != nil { + rv.cursors = append(rv.cursors, &segmentDictCursor{ + itr: asr.dictItr, + curr: next, + }) + } + } else { + rv.cursors = append(rv.cursors, &segmentDictCursor{ + dict: asr.dict, + }) + } + } + } + // after ensuring we've read all items on channel + if err != nil { + return nil, err + } + + if !randomLookup { + // prepare heap + heap.Init(rv) + } + + return rv, nil +} + +func (i *Snapshot) DictionaryLookup(field string) (segment.DictionaryLookup, error) { + return i.newDictionary(field, nil, true) +} + +func (i *Snapshot) DictionaryIterator(field string, automaton segment.Automaton, start, end []byte) ( + segment.DictionaryIterator, error) { + return i.newDictionary(field, func(i segment.Dictionary) segment.DictionaryIterator { + return i.Iterator(automaton, start, end) + }, false) +} + +func (i *Snapshot) Fields() ([]string, error) { + fieldsMap := map[string]struct{}{} + for _, seg := range i.segment { + fields := seg.Fields() + for _, field := range fields { + fieldsMap[field] = struct{}{} + } + } + rv := make([]string, 0, len(fieldsMap)) + for k := range fieldsMap { + rv = append(rv, k) + } + return rv, nil +} + +type collectionStats struct { + totalDocCount uint64 + docCount uint64 + sumTotalTermFreq uint64 +} + +func (c *collectionStats) TotalDocumentCount() uint64 { + return c.totalDocCount +} + +func (c *collectionStats) DocumentCount() uint64 { + return c.docCount +} + +func (c *collectionStats) SumTotalTermFrequency() uint64 { + return c.sumTotalTermFreq +} + +func (c *collectionStats) Merge(other segment.CollectionStats) { + c.totalDocCount += other.TotalDocumentCount() + c.docCount += other.DocumentCount() + c.sumTotalTermFreq += other.SumTotalTermFrequency() +} + +func (i *Snapshot) CollectionStats(field string) (segment.CollectionStats, error) { + // first handle case where this is a virtual field + if vFields, ok := i.parent.config.virtualFields[field]; ok { + for _, vField := range vFields { + if field == vField.Name() { + totalDocCount, _ := i.Count() + return &collectionStats{ + totalDocCount: totalDocCount, + docCount: totalDocCount, + sumTotalTermFreq: totalDocCount, + }, nil + } + } + } + + // FIXME just making this work for now, possibly should be async + var rv segment.CollectionStats + for _, seg := range i.segment { + segStats, err := seg.segment.CollectionStats(field) + if err != nil { + return nil, err + } + if rv == nil { + rv = segStats + } else { + rv.Merge(segStats) + } + } + return rv, nil +} + +func (i *Snapshot) Count() (uint64, error) { + var rv uint64 + for _, seg := range i.segment { + rv += seg.Count() + } + return rv, nil +} + +func (i *Snapshot) postingsIteratorAll(term string) (segment.PostingsIterator, error) { + results := make(chan *asyncSegmentResult) + for index, seg := range i.segment { + go func(index int, segment *segmentSnapshot) { + results <- &asyncSegmentResult{ + index: index, + docs: segment.DocNumbersLive(), + } + }(index, seg) + } + + return i.newPostingsIteratorAll(term, results) +} + +func (i *Snapshot) newPostingsIteratorAll(term string, results chan *asyncSegmentResult) (segment.PostingsIterator, error) { + rv := &postingsIteratorAll{ + preAlloc: virtualPosting{ + term: term, + }, + snapshot: i, + iterators: make([]roaring.IntPeekable, len(i.segment)), + } + var err error + for count := 0; count < len(i.segment); count++ { + asr := <-results + if asr.err != nil { + if err == nil { + // returns the first error encountered + err = asr.err + } + } else if err == nil { + rv.iterators[asr.index] = asr.docs.Iterator() + } + } + + if err != nil { + return nil, err + } + + return rv, nil +} + +func (i *Snapshot) VisitStoredFields(number uint64, visitor segment.StoredFieldVisitor) error { + segmentIndex, localDocNum := i.segmentIndexAndLocalDocNumFromGlobal(number) + + for _, vFields := range i.parent.config.virtualFields { + for _, vField := range vFields { + if vField.Store() { + cont := visitor(vField.Name(), vField.Value()) + if !cont { + return nil + } + } + } + } + err := i.segment[segmentIndex].VisitDocument(localDocNum, func(name string, val []byte) bool { + return visitor(name, val) + }) + if err != nil { + return err + } + return nil +} + +func (i *Snapshot) segmentIndexAndLocalDocNumFromGlobal(docNum uint64) (segmentIndex int, localDocNum uint64) { + segmentIndex = sort.Search(len(i.offsets), + func(x int) bool { + return i.offsets[x] > docNum + }) - 1 + + localDocNum = docNum - i.offsets[segmentIndex] + return segmentIndex, localDocNum +} + +func (i *Snapshot) PostingsIterator(term []byte, field string, includeFreq, + includeNorm, includeTermVectors bool) (segment.PostingsIterator, error) { + if vFields, ok := i.parent.config.virtualFields[field]; ok { + for _, vField := range vFields { + if vField.Index() { + var match bool + vField.EachTerm(func(vFieldTerm segment.FieldTerm) { + if bytes.Equal(vFieldTerm.Term(), term) { + match = true + } + }) + if match { + return i.postingsIteratorAll(string(term)) + } + } + } + } + + rv := i.allocPostingsIterator(field) + + rv.term = term + rv.field = field + rv.snapshot = i + if rv.postings == nil { + rv.postings = make([]segment.PostingsList, len(i.segment)) + } + if rv.iterators == nil { + rv.iterators = make([]segment.PostingsIterator, len(i.segment)) + } + rv.segmentOffset = 0 + rv.includeFreq = includeFreq + rv.includeNorm = includeNorm + rv.includeTermVectors = includeTermVectors + rv.currPosting = nil + rv.currID = 0 + + if rv.dicts == nil { + rv.dicts = make([]segment.Dictionary, len(i.segment)) + for i, seg := range i.segment { + dict, err := seg.segment.Dictionary(field) + if err != nil { + return nil, err + } + rv.dicts[i] = dict + } + } + + for i, seg := range i.segment { + pl, err := rv.dicts[i].PostingsList(term, seg.deleted, rv.postings[i]) + if err != nil { + return nil, err + } + rv.postings[i] = pl + rv.iterators[i], err = pl.Iterator(includeFreq, includeNorm, includeTermVectors, rv.iterators[i]) + if err != nil { + return nil, err + } + } + atomic.AddUint64(&i.parent.stats.TotTermSearchersStarted, uint64(1)) + return rv, nil +} + +func (i *Snapshot) allocPostingsIterator(field string) (tfr *postingsIterator) { + i.m2.Lock() + if i.fieldTFRs != nil { + tfrs := i.fieldTFRs[field] + last := len(tfrs) - 1 + if last >= 0 { + tfr = tfrs[last] + tfrs[last] = nil + i.fieldTFRs[field] = tfrs[:last] + i.m2.Unlock() + return + } + } + i.m2.Unlock() + return &postingsIterator{ + recycle: true, + } +} + +func (i *Snapshot) recyclePostingsIterator(tfr *postingsIterator) { + if !tfr.recycle { + // Do not recycle an optimized unadorned term field reader (used for + // ConjunctionUnadorned or DisjunctionUnadorned), during when a fresh + // roaring.Bitmap is built by AND-ing or OR-ing individual bitmaps, + // and we'll need to release them for GC. (See MB-40916) + return + } + + if i.epoch != i.parent.currentEpoch() { + // if we're not the current root (mutations happened), don't bother recycling + return + } + + i.m2.Lock() + if i.fieldTFRs == nil { + i.fieldTFRs = map[string][]*postingsIterator{} + } + i.fieldTFRs[tfr.field] = append(i.fieldTFRs[tfr.field], tfr) + i.m2.Unlock() +} + +func (i *Snapshot) unadornedPostingsIterator( + term []byte, field string) *postingsIterator { + // This IndexSnapshotTermFieldReader will not be recycled, more + // conversation here: https://github.com/blevesearch/bleve/pull/1438 + return &postingsIterator{ + term: term, + field: field, + snapshot: i, + iterators: make([]segment.PostingsIterator, len(i.segment)), + segmentOffset: 0, + includeFreq: false, + includeNorm: false, + includeTermVectors: false, + recycle: false, + } +} + +const blugeSnapshotFormatVersion1 = 1 +const blugeSnapshotFormatVersion = blugeSnapshotFormatVersion1 +const crcWidth = 4 + +func (i *Snapshot) WriteTo(w io.Writer, _ chan struct{}) (int64, error) { + bw := bufio.NewWriter(w) + chw := newCountHashWriter(bw) + + var bytesWritten int64 + var intBuf = make([]byte, binary.MaxVarintLen64) + // write the bluge snapshot format version number + n := binary.PutUvarint(intBuf, uint64(blugeSnapshotFormatVersion)) + sz, err := chw.Write(intBuf[:n]) + if err != nil { + return bytesWritten, fmt.Errorf("error writing snapshot %d: %w", i.epoch, err) + } + bytesWritten += int64(sz) + + // write number of segments + n = binary.PutUvarint(intBuf, uint64(len(i.segment))) + sz, err = chw.Write(intBuf[:n]) + if err != nil { + return bytesWritten, fmt.Errorf("error writing snapshot %d: %w", i.epoch, err) + } + bytesWritten += int64(sz) + + for _, segmentSnapshot := range i.segment { + sz, err = recordSegment(chw, segmentSnapshot, segmentSnapshot.id, segmentSnapshot.segment.Type(), segmentSnapshot.segment.Version()) + if err != nil { + return bytesWritten, fmt.Errorf("error writing snapshot %d: %w", i.epoch, err) + } + bytesWritten += int64(sz) + } + + // write crc32 at end of file + crc32 := chw.Sum32() + binary.BigEndian.PutUint32(intBuf, crc32) + sz, err = chw.Write(intBuf[:crcWidth]) + if err != nil { + return bytesWritten, fmt.Errorf("error writing snapshot %d: %w", i.epoch, err) + } + bytesWritten += int64(sz) + + err = bw.Flush() + if err != nil { + return bytesWritten, err + } + + return bytesWritten, nil +} + +func recordSegment(w io.Writer, snapshot *segmentSnapshot, id uint64, typ string, ver uint32) (int, error) { + var bytesWritten int + var intBuf = make([]byte, binary.MaxVarintLen64) + // record type + sz, err := writeVarLenString(w, intBuf, typ) + if err != nil { + return bytesWritten, err + } + bytesWritten += sz + + // record version + binary.BigEndian.PutUint32(intBuf, ver) + sz, err = w.Write(intBuf[:4]) + if err != nil { + return bytesWritten, err + } + bytesWritten += sz + + // record segment id + n := binary.PutUvarint(intBuf, id) + sz, err = w.Write(intBuf[:n]) + if err != nil { + return bytesWritten, err + } + bytesWritten += sz + + // record deleted bits + if snapshot.deleted != nil { + var deletedBytes []byte + deletedBytes, err = snapshot.deleted.ToBytes() + if err != nil { + return bytesWritten, err + } + // first length + n := binary.PutUvarint(intBuf, uint64(len(deletedBytes))) + sz, err = w.Write(intBuf[:n]) + if err != nil { + return bytesWritten, err + } + bytesWritten += sz + + // then data + sz, err = w.Write(deletedBytes) + if err != nil { + return bytesWritten, err + } + bytesWritten += sz + } else { + n := binary.PutUvarint(intBuf, 0) + sz, err = w.Write(intBuf[:n]) + if err != nil { + return bytesWritten, err + } + bytesWritten += sz + } + + return bytesWritten, nil +} + +func writeVarLenString(w io.Writer, intBuf []byte, str string) (int, error) { + var bytesWritten int + n := binary.PutUvarint(intBuf, uint64(len(str))) + sz, err := w.Write(intBuf[:n]) + if err != nil { + return bytesWritten, err + } + bytesWritten += sz + sz, err = w.Write([]byte(str)) + if err != nil { + return bytesWritten, err + } + bytesWritten += sz + return bytesWritten, nil +} + +func (i *Snapshot) ReadFrom(r io.Reader) (int64, error) { + var bytesRead int64 + br := bufio.NewReader(r) + + // read bluge snapshot format version + peek, err := br.Peek(binary.MaxVarintLen64) + if err != nil && err != io.EOF { + return bytesRead, fmt.Errorf("error peeking snapshot format version %d: %w", i.epoch, err) + } + snapshotFormatVersion, n := binary.Uvarint(peek) + sz, err := br.Discard(n) + if err != nil { + return bytesRead, fmt.Errorf("error reading snapshot format version %d: %w", i.epoch, err) + } + bytesRead += int64(sz) + + if snapshotFormatVersion == blugeSnapshotFormatVersion1 { + n, err := i.readFromVersion1(br) + return n + bytesRead, err + } + + return bytesRead, fmt.Errorf("unsupportred snapshot format version: %d", snapshotFormatVersion) +} + +func (i *Snapshot) readFromVersion1(br *bufio.Reader) (int64, error) { + var bytesRead int64 + + // read number of segments + peek, err := br.Peek(binary.MaxVarintLen64) + if err != nil && err != io.EOF { + return bytesRead, fmt.Errorf("error peeking snapshot number of segments %d: %w", i.epoch, err) + } + numSegments, n := binary.Uvarint(peek) + sz, err := br.Discard(n) + if err != nil { + return bytesRead, fmt.Errorf("error reading snapshot number of segments %d: %w", i.epoch, err) + } + bytesRead += int64(sz) + + for j := 0; j < int(numSegments); j++ { + segmentBytesRead, ss, err := i.readSegmentSnapshot(br) + if err != nil { + return bytesRead, err + } + bytesRead += segmentBytesRead + + i.segment = append(i.segment, ss) + } + + return bytesRead, nil +} + +func (i *Snapshot) readSegmentSnapshot(br *bufio.Reader) (bytesRead int64, ss *segmentSnapshot, err error) { + var sz int + var segmentType string + // read type + sz, segmentType, err = readVarLenString(br) + if err != nil { + return bytesRead, nil, fmt.Errorf("error reading snapshot %d: %w", i.epoch, err) + } + bytesRead += int64(sz) + + // read ver + verBuf := make([]byte, 4) + sz, err = br.Read(verBuf) + if err != nil { + return bytesRead, nil, fmt.Errorf("error reading snapshot %d: %w", i.epoch, err) + } + segmentVersion := binary.BigEndian.Uint32(verBuf) + bytesRead += int64(sz) + + // read segment id + peekSegmentID, err := br.Peek(binary.MaxVarintLen64) + if err != nil && err != io.EOF { + return bytesRead, nil, fmt.Errorf("error reading snapshot %d: %w", i.epoch, err) + } + segmentID, n := binary.Uvarint(peekSegmentID) + sz, err = br.Discard(n) + if err != nil { + return bytesRead, nil, fmt.Errorf("error reading snapshot %d: %w", i.epoch, err) + } + bytesRead += int64(sz) + + ss = &segmentSnapshot{ + id: segmentID, + segmentType: segmentType, + segmentVersion: segmentVersion, + } + + // read size of deleted bitmap + peek, err := br.Peek(binary.MaxVarintLen64) + if err != nil && err != io.EOF { + return bytesRead, nil, fmt.Errorf("xerror reading snapshot %d: %w", i.epoch, err) + } + delLen, n := binary.Uvarint(peek) + sz, err = br.Discard(n) + if err != nil { + return bytesRead, nil, fmt.Errorf("error reading snapshot %d: %w", i.epoch, err) + } + bytesRead += int64(sz) + + if delLen > 0 { + deletedBytes := make([]byte, int(delLen)) + sz, err = io.ReadFull(br, deletedBytes) + if err != nil { + return bytesRead, nil, fmt.Errorf("error reading snapshot %d: %w", i.epoch, err) + } + bytesRead += int64(sz) + + rr := bytes.NewReader(deletedBytes) + deletedBitmap := roaring.NewBitmap() + _, err = deletedBitmap.ReadFrom(rr) + if err != nil { + return bytesRead, nil, fmt.Errorf("error reading snapshot %d: %w", i.epoch, err) + } + + if !deletedBitmap.IsEmpty() { + ss.deleted = deletedBitmap + } + } + return bytesRead, ss, nil +} + +func readVarLenString(r *bufio.Reader) (n int, str string, err error) { + peek, err := r.Peek(binary.MaxVarintLen64) + if err != nil { + return n, "", err + } + strLen, uVarRead := binary.Uvarint(peek) + sz, err := r.Discard(uVarRead) + if err != nil { + return n, "", err + } + n += sz + + strBytes := make([]byte, strLen) + sz, err = r.Read(strBytes) + if err != nil { + return n, "", err + } + n += sz + return n, string(strBytes), nil +} + +func (i *Snapshot) DocumentValueReader(fields []string) ( + segment.DocumentValueReader, error) { + return &documentValueReader{i: i, fields: fields, currSegmentIndex: -1}, nil +} + +func (i *Snapshot) Backup(remote Directory, cancel chan struct{}) error { + // first copy all the segments + for j := range i.segment { + err := remote.Persist(ItemKindSegment, i.segment[j].id, i.segment[j].segment, cancel) + if err != nil { + return fmt.Errorf("error backing up segment %d: %w", i.segment[j].id, err) + } + } + // now persist ourself (snapshot) + err := remote.Persist(ItemKindSnapshot, i.epoch, i, cancel) + if err != nil { + return fmt.Errorf("error backing up snapshot %d: %w", i.epoch, err) + } + + return nil +} + +type documentValueReader struct { + i *Snapshot + fields []string + sdvr segment.DocumentValueReader + + currSegmentIndex int +} + +func (dvr *documentValueReader) VisitDocumentValues(number uint64, + visitor segment.DocumentValueVisitor) (err error) { + segmentIndex, localDocNum := dvr.i.segmentIndexAndLocalDocNumFromGlobal(number) + if segmentIndex >= len(dvr.i.segment) { + return nil + } + + if dvr.currSegmentIndex != segmentIndex { + dvr.currSegmentIndex = segmentIndex + sdvr, err := dvr.i.segment[dvr.currSegmentIndex].segment.DocumentValueReader(dvr.fields) + if err != nil { + return err + } + dvr.sdvr = sdvr + } + + // handle virtual fields first + for _, field := range dvr.fields { + if vFields, ok := dvr.i.parent.config.virtualFields[field]; ok { + for _, vField := range vFields { + vField := vField + vField.EachTerm(func(term segment.FieldTerm) { + visitor(vField.Name(), term.Term()) + }) + } + } + } + + return dvr.sdvr.VisitDocumentValues(localDocNum, visitor) +} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/stats.go b/vendor/github.com/blugelabs/bluge/index/stats.go similarity index 77% rename from vendor/github.com/blevesearch/bleve/v2/index/scorch/stats.go rename to vendor/github.com/blugelabs/bluge/index/stats.go index 626fff2e4..10bee48b4 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/stats.go +++ b/vendor/github.com/blugelabs/bluge/index/stats.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,14 +12,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -package scorch +package index import ( - "encoding/json" - "reflect" "sync/atomic" ) +func (s *Writer) Stats() Stats { + // copy current stats + rv := s.stats + + // add some computed values + numFilesOnDisk, numBytesUsedDisk := s.directory.Stats() + + rv.CurOnDiskBytes = numBytesUsedDisk + rv.CurOnDiskFiles = numFilesOnDisk + + return s.stats +} + // Stats tracks statistics about the index, fields that are // prefixed like CurXxxx are gauges (can go up and down), // and fields that are prefixed like TotXxxx are monotonically @@ -47,9 +58,6 @@ type Stats struct { TotTermSearchersStarted uint64 TotTermSearchersFinished uint64 - TotEventTriggerStarted uint64 - TotEventTriggerCompleted uint64 - TotIntroduceLoop uint64 TotIntroduceSegmentBeg uint64 TotIntroduceSegmentEnd uint64 @@ -85,9 +93,6 @@ type Stats struct { TotFileMergeLoopErr uint64 TotFileMergeLoopEnd uint64 - TotFileMergeForceOpsStarted uint64 - TotFileMergeForceOpsCompleted uint64 - TotFileMergePlan uint64 TotFileMergePlanErr uint64 TotFileMergePlanNone uint64 @@ -128,25 +133,27 @@ type Stats struct { MaxMemMergeZapTime uint64 TotMemMergeSegments uint64 TotMemorySegmentsAtRoot uint64 -} -// atomically populates the returned map -func (s *Stats) ToMap() map[string]interface{} { - m := map[string]interface{}{} - sve := reflect.ValueOf(s).Elem() - svet := sve.Type() - for i := 0; i < svet.NumField(); i++ { - svef := sve.Field(i) - if svef.CanAddr() { - svefp := svef.Addr().Interface() - m[svet.Field(i).Name] = atomic.LoadUint64(svefp.(*uint64)) - } - } - return m + TotEventFired uint64 + TotEventReturned uint64 + + CurOnDiskBytes uint64 + CurOnDiskBytesUsedByRoot uint64 // FIXME not currently supported + CurOnDiskFiles uint64 + + // the following stats are only used internally + persistEpoch uint64 + persistSnapshotSize uint64 + mergeEpoch uint64 + mergeSnapshotSize uint64 + newSegBufBytesAdded uint64 + newSegBufBytesRemoved uint64 + analysisBytesAdded uint64 + analysisBytesRemoved uint64 } -// MarshalJSON implements json.Marshaler, and in contrast to standard -// json marshaling provides atomic safety -func (s *Stats) MarshalJSON() ([]byte, error) { - return json.Marshal(s.ToMap()) +func (s *Writer) numEventsBlocking() int { + eventsReturned := atomic.LoadUint64(&s.stats.TotEventReturned) + eventsFired := atomic.LoadUint64(&s.stats.TotEventFired) + return int(eventsFired - eventsReturned) } diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/unadorned.go b/vendor/github.com/blugelabs/bluge/index/unadorned.go similarity index 76% rename from vendor/github.com/blevesearch/bleve/v2/index/scorch/unadorned.go rename to vendor/github.com/blugelabs/bluge/index/unadorned.go index 855b81361..f6c9a5174 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/unadorned.go +++ b/vendor/github.com/blugelabs/bluge/index/unadorned.go @@ -12,27 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -package scorch +package index import ( - "github.com/RoaringBitmap/roaring" - segment "github.com/blevesearch/scorch_segment_api/v2" "math" - "reflect" -) - -var reflectStaticSizeUnadornedPostingsIteratorBitmap int -var reflectStaticSizeUnadornedPostingsIterator1Hit int -var reflectStaticSizeUnadornedPosting int -func init() { - var pib unadornedPostingsIteratorBitmap - reflectStaticSizeUnadornedPostingsIteratorBitmap = int(reflect.TypeOf(pib).Size()) - var pi1h unadornedPostingsIterator1Hit - reflectStaticSizeUnadornedPostingsIterator1Hit = int(reflect.TypeOf(pi1h).Size()) - var up UnadornedPosting - reflectStaticSizeUnadornedPosting = int(reflect.TypeOf(up).Size()) -} + "github.com/RoaringBitmap/roaring" + segment "github.com/blugelabs/bluge_segment_api" +) type unadornedPostingsIteratorBitmap struct { actual roaring.IntPeekable @@ -52,10 +39,11 @@ func (i *unadornedPostingsIteratorBitmap) nextAtOrAfter(atOrAfter uint64) (segme if !exists { return nil, nil } - return UnadornedPosting(docNum), nil + up := unadornedPosting(docNum) + return &up, nil } -func (i *unadornedPostingsIteratorBitmap) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool) { +func (i *unadornedPostingsIteratorBitmap) nextDocNumAtOrAfter(atOrAfter uint64) (int, bool) { if i.actual == nil || !i.actual.HasNext() { return 0, false } @@ -65,13 +53,25 @@ func (i *unadornedPostingsIteratorBitmap) nextDocNumAtOrAfter(atOrAfter uint64) return 0, false // couldn't find anything } - return uint64(i.actual.Next()), true + return int(i.actual.Next()), true } func (i *unadornedPostingsIteratorBitmap) Size() int { return reflectStaticSizeUnadornedPostingsIteratorBitmap } +func (i *unadornedPostingsIteratorBitmap) Empty() bool { + return false +} + +func (i *unadornedPostingsIteratorBitmap) Count() uint64 { + return i.actualBM.GetCardinality() +} + +func (i *unadornedPostingsIteratorBitmap) Close() error { + return nil +} + func (i *unadornedPostingsIteratorBitmap) ActualBitmap() *roaring.Bitmap { return i.actualBM } @@ -111,7 +111,8 @@ func (i *unadornedPostingsIterator1Hit) nextAtOrAfter(atOrAfter uint64) (segment if !exists { return nil, nil } - return UnadornedPosting(docNum), nil + up := unadornedPosting(docNum) + return &up, nil } func (i *unadornedPostingsIterator1Hit) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool) { @@ -132,30 +133,46 @@ func (i *unadornedPostingsIterator1Hit) Size() int { return reflectStaticSizeUnadornedPostingsIterator1Hit } +func (i *unadornedPostingsIterator1Hit) Empty() bool { + return false +} + +func (i *unadornedPostingsIterator1Hit) Count() uint64 { + return 1 +} + +func (i *unadornedPostingsIterator1Hit) Close() error { + return nil +} + func newUnadornedPostingsIteratorFrom1Hit(docNum1Hit uint64) segment.PostingsIterator { return &unadornedPostingsIterator1Hit{ docNum1Hit, } } -type UnadornedPosting uint64 +type unadornedPosting uint64 + +func (p *unadornedPosting) Number() uint64 { + return uint64(*p) +} -func (p UnadornedPosting) Number() uint64 { - return uint64(p) +func (p *unadornedPosting) SetNumber(n uint64) { + *p = unadornedPosting(n) } -func (p UnadornedPosting) Frequency() uint64 { +func (p *unadornedPosting) Frequency() int { return 0 } -func (p UnadornedPosting) Norm() float64 { +func (p *unadornedPosting) Norm() float64 { return 0 } -func (p UnadornedPosting) Locations() []segment.Location { +func (p *unadornedPosting) Locations() []segment.Location { return nil } -func (p UnadornedPosting) Size() int { +func (p *unadornedPosting) Size() int { return reflectStaticSizeUnadornedPosting } diff --git a/vendor/github.com/blugelabs/bluge/index/writer.go b/vendor/github.com/blugelabs/bluge/index/writer.go new file mode 100644 index 000000000..cd95c3db9 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/writer.go @@ -0,0 +1,556 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "log" + "sync" + "sync/atomic" + "time" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/RoaringBitmap/roaring" +) + +type Writer struct { + config Config + deletionPolicy DeletionPolicy + directory Directory + segPlugin *SegmentPlugin // segment plug-in in use + + rootLock sync.RWMutex + root *Snapshot // holds 1 ref-count on the root + + introductions chan *segmentIntroduction + + rootPersisted []chan error // closed when root is persisted + persistedCallbacks []func(error) + + // state + nextSegmentID uint64 + + // control/track goroutines + closeCh chan struct{} + asyncTasks sync.WaitGroup + + stats Stats + + closeOnce sync.Once +} + +func OpenWriter(config Config) (*Writer, error) { + rv := &Writer{ + config: config, + deletionPolicy: config.DeletionPolicyFunc(), + directory: config.DirectoryFunc(), + closeCh: make(chan struct{}), + } + + // start the requested number of analysis workers + for i := 0; i < config.NumAnalysisWorkers; i++ { + config.GoFunc(func() { + analysisWorker(config.AnalysisChan, rv.closeCh) + }) + } + + var err error + rv.segPlugin, err = loadSegmentPlugin(config.supportedSegmentPlugins, config.SegmentType, config.SegmentVersion) + if err != nil { + return nil, fmt.Errorf("error loading segment plugin: %v", err) + } + + rv.root = &Snapshot{ + parent: rv, + refs: 1, + creator: "NewChill", + } + + err = rv.directory.Setup(false) + if err != nil { + return nil, fmt.Errorf("error setting up directory: %w", err) + } + + err = rv.directory.Lock() + if err != nil { + return nil, fmt.Errorf("error getting exclusive access to diretory: %w", err) + } + + lastPersistedEpoch, nextSnapshotEpoch, err2 := rv.loadSnapshots() + if err2 != nil { + _ = rv.Close() + return nil, err2 + } + + // initialize nextSegmentID to a safe value + existingSegments, err := rv.directory.List(ItemKindSegment) + if err != nil { + _ = rv.Close() + return nil, err + } + if len(existingSegments) > 0 { + rv.nextSegmentID = existingSegments[0] + } + rv.nextSegmentID++ + + // give deletion policy an opportunity to cleanup now before we begin + err = rv.deletionPolicy.Cleanup(rv.directory) + if err != nil { + _ = rv.Close() + return nil, fmt.Errorf("error cleaning up on open: %v", err) + } + + rv.introductions = make(chan *segmentIntroduction) + persistsCh := make(chan *persistIntroduction) + mergesCh := make(chan *segmentMerge) + introducerNotifier := make(watcherChan, 1) + persistNotifier := make(watcherChan, 1) + + // start async tasks + rv.asyncTasks.Add(1) + go rv.introducerLoop(rv.introductions, persistsCh, mergesCh, introducerNotifier, nextSnapshotEpoch) + rv.asyncTasks.Add(1) + go rv.persisterLoop(mergesCh, persistsCh, introducerNotifier, persistNotifier, lastPersistedEpoch) + rv.asyncTasks.Add(1) + go rv.mergerLoop(mergesCh, persistNotifier) + + return rv, nil +} + +func (s *Writer) loadSnapshots() (lastPersistedEpoch, nextSnapshotEpoch uint64, err error) { + nextSnapshotEpoch = 1 + snapshotEpochs, err := s.directory.List(ItemKindSnapshot) + if err != nil { + return 0, 0, err + } + + // try and load each snapshot seen + var snapshotsFound, snapshotLoaded bool + // walk snapshots backwards (oldest to newest) + // this allows the deletion policy see each snapshot + // in the order it was created + for i := len(snapshotEpochs) - 1; i >= 0; i-- { + snapshotEpoch := snapshotEpochs[i] + snapshotsFound = true + var indexSnapshot *Snapshot + indexSnapshot, err = s.loadSnapshot(snapshotEpoch) + if err != nil { + log.Printf("error loading snapshot epoch: %d: %v", snapshotEpoch, err) + // but keep going and hope there is another newer snapshot that works + continue + } + snapshotLoaded = true + + lastPersistedEpoch = indexSnapshot.epoch + nextSnapshotEpoch = indexSnapshot.epoch + 1 + + // inform the deletion policy about this commit + s.deletionPolicy.Commit(indexSnapshot) + + // make this snapshot the root (and retire the previous) + atomic.StoreUint64(&s.stats.TotFileSegmentsAtRoot, uint64(len(indexSnapshot.segment))) + s.replaceRoot(indexSnapshot, nil, nil) + } + if snapshotsFound && !snapshotLoaded { + // handle this case better, there was at least one snapshot on disk + // but we failed to successfully load anything + // this results in losing all data and starting from scratch + // should require, some more explicit decision, for now error out + return 0, 0, fmt.Errorf("existing snapshots found, but none could be loaded, exiting") + } + return lastPersistedEpoch, nextSnapshotEpoch, nil +} + +func (s *Writer) fireEvent(kind int, dur time.Duration) { + if s.config.EventCallback != nil { + atomic.AddUint64(&s.stats.TotEventFired, 1) + s.config.EventCallback(Event{Kind: kind, Chill: s, Duration: dur}) + atomic.AddUint64(&s.stats.TotEventReturned, 1) + } +} + +func (s *Writer) fireAsyncError(err error) { + if s.config.AsyncError != nil { + s.config.AsyncError(err) + } + atomic.AddUint64(&s.stats.TotOnErrors, 1) +} + +func (s *Writer) Close() (err error) { + s.closeOnce.Do(func() { + err = s.close() + }) + return err +} + +func (s *Writer) close() (err error) { + startTime := time.Now() + defer func() { + s.fireEvent(EventKindClose, time.Since(startTime)) + }() + + s.fireEvent(EventKindCloseStart, 0) + + // signal to async tasks we want to close + close(s.closeCh) + // wait for them to close + s.asyncTasks.Wait() + + s.replaceRoot(nil, nil, nil) + + err = s.directory.Unlock() + if err != nil { + return err + } + + return nil +} + +// Batch applies a batch of changes to the index atomically +func (s *Writer) Batch(batch *Batch) (err error) { + start := time.Now() + + defer func() { + s.fireEvent(EventKindBatchIntroduction, time.Since(start)) + }() + + var numUpdates = len(batch.documents) + var numDeletes = len(batch.ids) + + var allDocsAnalyzed sync.WaitGroup + + for _, doc := range batch.documents { + allDocsAnalyzed.Add(1) + doc := doc // capture variable + if doc != nil { + aw := func() { + doc.Analyze() + allDocsAnalyzed.Done() + } + // put the work on the queue + s.config.AnalysisChan <- aw + } + } + + allDocsAnalyzed.Wait() + + atomic.AddUint64(&s.stats.TotAnalysisTime, uint64(time.Since(start))) + + indexStart := time.Now() + + // notify handlers that we're about to introduce a segment + s.fireEvent(EventKindBatchIntroductionStart, 0) + + var newSegment *segmentWrapper + var bufBytes uint64 + if numUpdates > 0 { + newSegment, bufBytes, err = s.newSegment(batch.documents) + if err != nil { + return err + } + atomic.AddUint64(&s.stats.newSegBufBytesAdded, bufBytes) + } else { + atomic.AddUint64(&s.stats.TotBatchesEmpty, 1) + } + + err = s.prepareSegment(newSegment, batch.ids, nil, batch.PersistedCallback()) + if err != nil { + if newSegment != nil { + _ = newSegment.Close() + } + atomic.AddUint64(&s.stats.TotOnErrors, 1) + } else { + atomic.AddUint64(&s.stats.TotUpdates, uint64(numUpdates)) + atomic.AddUint64(&s.stats.TotDeletes, uint64(numDeletes)) + atomic.AddUint64(&s.stats.TotBatches, 1) + } + + atomic.AddUint64(&s.stats.newSegBufBytesRemoved, bufBytes) + atomic.AddUint64(&s.stats.TotIndexTime, uint64(time.Since(indexStart))) + + return err +} + +func (s *Writer) prepareSegment(newSegment *segmentWrapper, idTerms []segment.Term, + internalOps map[string][]byte, persistedCallback func(error)) error { + // new introduction + introduction := &segmentIntroduction{ + id: atomic.AddUint64(&s.nextSegmentID, 1), + data: newSegment, + idTerms: idTerms, + obsoletes: make(map[uint64]*roaring.Bitmap), + internal: internalOps, + applied: make(chan error), + persistedCallback: persistedCallback, + } + + if !s.config.UnsafeBatch { + introduction.persisted = make(chan error, 1) + } + + // optimistically prepare obsoletes outside of rootLock + root := s.currentSnapshot() + defer func() { _ = root.Close() }() + + for _, seg := range root.segment { + delta, err := seg.segment.DocsMatchingTerms(idTerms) + if err != nil { + return err + } + introduction.obsoletes[seg.id] = delta + } + + introStartTime := time.Now() + + s.introductions <- introduction + + // block until this segment is applied + err := <-introduction.applied + if err != nil { + return err + } + + if introduction.persisted != nil { + err = <-introduction.persisted + } + + introTime := uint64(time.Since(introStartTime)) + atomic.AddUint64(&s.stats.TotBatchIntroTime, introTime) + if atomic.LoadUint64(&s.stats.MaxBatchIntroTime) < introTime { + atomic.StoreUint64(&s.stats.MaxBatchIntroTime, introTime) + } + + return err +} + +// Reader returns a low-level accessor on the index data. Close it to +// release associated resources. +func (s *Writer) Reader() (*Snapshot, error) { + return s.currentSnapshot(), nil +} + +func (s *Writer) MemoryUsed() (memUsed uint64) { + indexSnapshot := s.currentSnapshot() + if indexSnapshot == nil { + return + } + + defer func() { + _ = indexSnapshot.Close() + }() + + // Account for current root snapshot overhead + memUsed += uint64(indexSnapshot.Size()) + + // Account for snapshot that the persister may be working on + persistEpoch := atomic.LoadUint64(&s.stats.persistEpoch) + persistSnapshotSize := atomic.LoadUint64(&s.stats.persistSnapshotSize) + if persistEpoch != 0 && indexSnapshot.epoch > persistEpoch { + // the snapshot that the persister is working on isn't the same as + // the current snapshot + memUsed += persistSnapshotSize + } + + // Account for snapshot that the merger may be working on + mergeEpoch := atomic.LoadUint64(&s.stats.mergeEpoch) + mergeSnapshotSize := atomic.LoadUint64(&s.stats.mergeSnapshotSize) + if mergeEpoch != 0 && indexSnapshot.epoch > mergeEpoch { + // the snapshot that the merger is working on isn't the same as + // the current snapshot + memUsed += mergeSnapshotSize + } + + memUsed += atomic.LoadUint64(&s.stats.newSegBufBytesAdded) - + atomic.LoadUint64(&s.stats.newSegBufBytesRemoved) + + memUsed += atomic.LoadUint64(&s.stats.analysisBytesAdded) - + atomic.LoadUint64(&s.stats.analysisBytesRemoved) + + return memUsed +} + +func (s *Writer) currentSnapshot() *Snapshot { + s.rootLock.RLock() + var rv *Snapshot + if s.root != nil { + rv = s.root + if rv != nil { + rv.addRef() + } + } + s.rootLock.RUnlock() + return rv +} + +func (s *Writer) currentEpoch() uint64 { + indexSnapshot := s.currentSnapshot() + var rv uint64 + if indexSnapshot != nil { + rv = indexSnapshot.epoch + _ = indexSnapshot.Close() + } + return rv +} + +func OpenReader(config Config) (*Snapshot, error) { + parent := &Writer{ + config: config, + directory: config.DirectoryFunc(), + } + + var err error + parent.segPlugin, err = loadSegmentPlugin(config.supportedSegmentPlugins, + config.SegmentType, config.SegmentVersion) + if err != nil { + return nil, fmt.Errorf("error loadign segment plugin: %v", err) + } + + err = parent.directory.Setup(true) + if err != nil { + return nil, fmt.Errorf("error setting up directory: %w", err) + } + + snapshotEpochs, err := parent.directory.List(ItemKindSnapshot) + if err != nil { + return nil, err + } + + // start with most recent + var indexSnapshot *Snapshot + for _, snapshotEpoch := range snapshotEpochs { + indexSnapshot, err = parent.loadSnapshot(snapshotEpoch) + if err != nil { + log.Printf("error loading snapshot epoch: %d: %v", snapshotEpoch, err) + // but keep going and hope there is another newer snapshot that works + continue + } + break + } + if indexSnapshot == nil { + return nil, fmt.Errorf("unable to find a usable snapshot") + } + + return indexSnapshot, nil +} + +func (s *Writer) loadSnapshot(epoch uint64) (*Snapshot, error) { + snapshot := &Snapshot{ + parent: s, + epoch: epoch, + refs: 1, + creator: "loadSnapshot", + } + + data, closer, err := s.directory.Load(ItemKindSnapshot, epoch) + if err != nil { + return nil, err + } + + // wrap the reader so we never read the last 4 bytes (CRC) + dataReader := io.LimitReader(data.Reader(), int64(data.Len()-crcWidth)) + var crcReader *countHashReader + if s.config.ValidateSnapshotCRC { + crcReader = newCountHashReader(dataReader) + dataReader = crcReader + } + + _, err = snapshot.ReadFrom(dataReader) + if err != nil { + if closer != nil { + _ = closer.Close() + } + return nil, err + } + + if crcReader != nil { + computedCRCBytes := make([]byte, crcWidth) + binary.BigEndian.PutUint32(computedCRCBytes, crcReader.Sum32()) + var fileCRCBytes []byte + fileCRCBytes, err = data.Read(data.Len()-crcWidth, data.Len()) + if err != nil { + if closer != nil { + _ = closer.Close() + } + return nil, fmt.Errorf("error reading snapshot CRC: %w", err) + } + if !bytes.Equal(computedCRCBytes, fileCRCBytes) { + if closer != nil { + _ = closer.Close() + } + return nil, fmt.Errorf("CRC mismatch loading snapshot %d: computed: %x file: %x", + epoch, computedCRCBytes, fileCRCBytes) + } + } + if closer != nil { + err = closer.Close() + if err != nil { + return nil, err + } + } + + var running uint64 + for _, segSnapshot := range snapshot.segment { + segPlugin, err := loadSegmentPlugin(s.config.supportedSegmentPlugins, segSnapshot.segmentType, segSnapshot.segmentVersion) + if err != nil { + return nil, fmt.Errorf("error loading required segment plugin: %v", err) + } + segSnapshot.segment, err = s.loadSegment(segSnapshot.id, segPlugin) + if err != nil { + return nil, fmt.Errorf("error opening segment %d: %w", segSnapshot.id, err) + } + + snapshot.offsets = append(snapshot.offsets, running) + running += segSnapshot.segment.Count() + } + + return snapshot, nil +} + +func (s *Writer) loadSegment(id uint64, plugin *SegmentPlugin) (*segmentWrapper, error) { + data, closer, err := s.directory.Load(ItemKindSegment, id) + if err != nil { + return nil, fmt.Errorf("error loading segment fromt directory: %v", err) + } + seg, err := plugin.Load(data) + if err != nil { + if closer != nil { + _ = closer.Close() + } + return nil, fmt.Errorf("error loading segment: %v", err) + } + return &segmentWrapper{ + Segment: seg, + refCounter: &closeOnLastRefCounter{ + closer: closer, + refs: 1, + }, + persisted: true, + }, nil +} + +func analysisWorker(q chan func(), closeCh chan struct{}) { + for { + select { + case <-closeCh: + return + case w := <-q: + w() + } + } +} diff --git a/vendor/github.com/blugelabs/bluge/index/writer_offline.go b/vendor/github.com/blugelabs/bluge/index/writer_offline.go new file mode 100644 index 000000000..97f839a2e --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/index/writer_offline.go @@ -0,0 +1,215 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "fmt" + "io" + "sync" + + "github.com/RoaringBitmap/roaring" + + segment "github.com/blugelabs/bluge_segment_api" +) + +type WriterOffline struct { + m sync.Mutex + config Config + directory Directory + segPlugin *SegmentPlugin + segCount uint64 + segIDs []uint64 + + mergeMax int +} + +func OpenOfflineWriter(config Config) (writer *WriterOffline, err error) { + writer = &WriterOffline{ + config: config, + directory: config.DirectoryFunc(), + segPlugin: nil, + mergeMax: 10, + } + + err = writer.directory.Setup(false) + if err != nil { + return nil, fmt.Errorf("error setting up directory: %w", err) + } + + writer.segPlugin, err = loadSegmentPlugin(config.supportedSegmentPlugins, config.SegmentType, config.SegmentVersion) + if err != nil { + return nil, fmt.Errorf("error loading segment plugin: %v", err) + } + + return writer, nil +} + +func (s *WriterOffline) Batch(batch *Batch) (err error) { + s.m.Lock() + defer s.m.Unlock() + + if len(batch.documents) == 0 { + return nil + } + + for _, doc := range batch.documents { + if doc != nil { + doc.Analyze() + } + } + + newSegment, _, err := s.segPlugin.New(batch.documents, s.config.NormCalc) + if err != nil { + return err + } + + err = s.directory.Persist(ItemKindSegment, s.segCount, newSegment, nil) + if err != nil { + return fmt.Errorf("error persisting segment: %v", err) + } + s.segIDs = append(s.segIDs, s.segCount) + s.segCount++ + + return nil +} + +func (s *WriterOffline) doMerge() error { + for len(s.segIDs) > 1 { + // merge the next number of segments into one new one + // or, if there are fewer than remaining, merge them all + mergeCount := s.mergeMax + if mergeCount > len(s.segIDs) { + mergeCount = len(s.segIDs) + } + + mergeIDs := s.segIDs[0:mergeCount] + s.segIDs = s.segIDs[mergeCount:] + + // open each of the segments to be merged + mergeSegs := make([]segment.Segment, 0, mergeCount) + + var closers []io.Closer + // closeOpenedSegs attempts to close all opened + // segments even if an error occurs, in which case + // the first error is returned + closeOpenedSegs := func() error { + var err error + for _, closer := range closers { + clErr := closer.Close() + if clErr != nil && err == nil { + err = clErr + } + } + return err + } + + for _, mergeID := range mergeIDs { + data, closer, err := s.directory.Load(ItemKindSegment, mergeID) + if err != nil { + _ = closeOpenedSegs() + return fmt.Errorf("error loading segment from directory: %w", err) + } + if closer != nil { + closers = append(closers, closer) + } + seg, err := s.segPlugin.Load(data) + if err != nil { + _ = closeOpenedSegs() + return fmt.Errorf("error loading segment: %w", err) + } + mergeSegs = append(mergeSegs, seg) + } + + // do the merge + drops := make([]*roaring.Bitmap, mergeCount) + merger := s.segPlugin.Merge(mergeSegs, drops, s.config.MergeBufferSize) + + err := s.directory.Persist(ItemKindSegment, s.segCount, merger, nil) + if err != nil { + _ = closeOpenedSegs() + return fmt.Errorf("error merging segments (%v): %w", mergeIDs, err) + } + s.segIDs = append(s.segIDs, s.segCount) + s.segCount++ + + // close segments opened for merge + err = closeOpenedSegs() + if err != nil { + return fmt.Errorf("error closing opened segments: %w", err) + } + + // remove merged segments + for _, mergeID := range mergeIDs { + err = s.directory.Remove(ItemKindSegment, mergeID) + if err != nil { + return fmt.Errorf("error removing segment %v after merge: %w", mergeIDs, err) + } + } + } + + return nil +} + +func (s *WriterOffline) Close() error { + s.m.Lock() + defer s.m.Unlock() + + // perform all the merging into one segment + err := s.doMerge() + if err != nil { + return fmt.Errorf("error while merging: %w", err) + } + + // open the merged segment + data, closer, err := s.directory.Load(ItemKindSegment, s.segIDs[0]) + if err != nil { + return fmt.Errorf("error loading segment from directory: %w", err) + } + finalSeg, err := s.segPlugin.Load(data) + if err != nil { + if closer != nil { + _ = closer.Close() + } + return fmt.Errorf("error loading segment: %w", err) + } + + // fake snapshot referencing this segment + snapshot := &Snapshot{ + segment: []*segmentSnapshot{ + { + id: s.segIDs[0], + segment: &segmentWrapper{ + Segment: finalSeg, + refCounter: nil, + persisted: true, + }, + segmentType: s.segPlugin.Type, + segmentVersion: s.segPlugin.Version, + }, + }, + epoch: s.segIDs[0], + } + + // persist the snapshot + err = s.directory.Persist(ItemKindSnapshot, s.segIDs[0], snapshot, nil) + if err != nil { + return fmt.Errorf("error recording snapshot: %w", err) + } + + if closer != nil { + return closer.Close() + } + return nil +} diff --git a/vendor/github.com/blugelabs/bluge/multisearch.go b/vendor/github.com/blugelabs/bluge/multisearch.go new file mode 100644 index 000000000..9040b75c9 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/multisearch.go @@ -0,0 +1,95 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + "context" + + "github.com/blugelabs/bluge/search" +) + +type MultiSearcherList struct { + searchers []search.Searcher + index int + err error +} + +func NewMultiSearcherList(searchers []search.Searcher) *MultiSearcherList { + return &MultiSearcherList{ + searchers: searchers, + } +} + +func (m *MultiSearcherList) Next(ctx *search.Context) (*search.DocumentMatch, error) { + if m.err != nil { + return nil, m.err + } + if m.index < len(m.searchers) { + var dm *search.DocumentMatch + dm, m.err = m.searchers[m.index].Next(ctx) + if m.err != nil { + return nil, m.err + } + if dm == nil { + m.index++ + return m.Next(ctx) + } + return dm, nil + } + return nil, nil +} + +func (m *MultiSearcherList) DocumentMatchPoolSize() int { + // we search sequentially, so just use largest + var rv int + for _, searcher := range m.searchers { + ps := searcher.DocumentMatchPoolSize() + if ps > rv { + rv = ps + } + } + return rv +} + +func (m *MultiSearcherList) Close() (err error) { + for _, searcher := range m.searchers { + cerr := searcher.Close() + if err == nil { + err = cerr + } + } + return err +} + +func MultiSearch(ctx context.Context, req SearchRequest, readers ...*Reader) (search.DocumentMatchIterator, error) { + collector := req.Collector() + + var searchers []search.Searcher + for _, reader := range readers { + searcher, err := req.Searcher(reader.reader, reader.config) + if err != nil { + return nil, err + } + searchers = append(searchers, searcher) + } + + msl := NewMultiSearcherList(searchers) + dmItr, err := collector.Collect(ctx, req.Aggregations(), msl) + if err != nil { + return nil, err + } + + return dmItr, nil +} diff --git a/vendor/github.com/blevesearch/bleve/v2/numeric/bin.go b/vendor/github.com/blugelabs/bluge/numeric/bin.go similarity index 72% rename from vendor/github.com/blevesearch/bleve/v2/numeric/bin.go rename to vendor/github.com/blugelabs/bluge/numeric/bin.go index 368952a2c..621ef3b18 100644 --- a/vendor/github.com/blevesearch/bleve/v2/numeric/bin.go +++ b/vendor/github.com/blugelabs/bluge/numeric/bin.go @@ -1,3 +1,17 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package numeric var interleaveMagic = []uint64{ diff --git a/vendor/github.com/blevesearch/bleve/v2/numeric/float.go b/vendor/github.com/blugelabs/bluge/numeric/float.go similarity index 91% rename from vendor/github.com/blevesearch/bleve/v2/numeric/float.go rename to vendor/github.com/blugelabs/bluge/numeric/float.go index 2bb14d7e8..3fa43cc27 100644 --- a/vendor/github.com/blevesearch/bleve/v2/numeric/float.go +++ b/vendor/github.com/blugelabs/bluge/numeric/float.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( func Float64ToInt64(f float64) int64 { fasint := int64(math.Float64bits(f)) if fasint < 0 { - fasint = fasint ^ 0x7fffffffffffffff + fasint ^= 0x7fffffffffffffff } return fasint } diff --git a/vendor/github.com/blevesearch/bleve/v2/geo/README.md b/vendor/github.com/blugelabs/bluge/numeric/geo/README.md similarity index 96% rename from vendor/github.com/blevesearch/bleve/v2/geo/README.md rename to vendor/github.com/blugelabs/bluge/numeric/geo/README.md index 43bcd98fe..a02e04923 100644 --- a/vendor/github.com/blevesearch/bleve/v2/geo/README.md +++ b/vendor/github.com/blugelabs/bluge/numeric/geo/README.md @@ -1,4 +1,4 @@ -# geo support in bleve +# geo support in blube First, all of this geo code is a Go adaptation of the [Lucene 5.3.2 sandbox geo support](https://lucene.apache.org/core/5_3_2/sandbox/org/apache/lucene/util/package-summary.html). diff --git a/vendor/github.com/blevesearch/bleve/v2/geo/geo.go b/vendor/github.com/blugelabs/bluge/numeric/geo/geo.go similarity index 91% rename from vendor/github.com/blevesearch/bleve/v2/geo/geo.go rename to vendor/github.com/blugelabs/bluge/numeric/geo/geo.go index 55eace1df..df81a5de7 100644 --- a/vendor/github.com/blevesearch/bleve/v2/geo/geo.go +++ b/vendor/github.com/blugelabs/bluge/numeric/geo/geo.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ import ( "fmt" "math" - "github.com/blevesearch/bleve/v2/numeric" + "github.com/blugelabs/bluge/numeric" ) // GeoBits is the number of bits used for a single geo point @@ -34,8 +34,11 @@ var minLatRad = minLat * degreesToRadian var maxLonRad = maxLon * degreesToRadian var maxLatRad = maxLat * degreesToRadian var geoTolerance = 1e-6 -var lonScale = float64((uint64(0x1)< 0 { - lon[0] = (lon[0] + lon[1]) / 2 + lons[0] = (lons[0] + lons[1]) / 2 } else { - lon[1] = (lon[0] + lon[1]) / 2 + lons[1] = (lons[0] + lons[1]) / 2 } } else { if cd&masks[j] > 0 { - lat[0] = (lat[0] + lat[1]) / 2 + lats[0] = (lats[0] + lats[1]) / 2 } else { - lat[1] = (lat[0] + lat[1]) / 2 + lats[1] = (lats[0] + lats[1]) / 2 } } even = !even } } - return (lat[0] + lat[1]) / 2, (lon[0] + lon[1]) / 2 + return (lats[0] + lats[1]) / 2, (lons[0] + lons[1]) / 2 } +const bitsPerChar = 5 + func EncodeGeoHash(lat, lon float64) string { even := true lats := []float64{-90.0, 90.0} @@ -98,7 +100,7 @@ func EncodeGeoHash(lat, lon float64) string { } } even = !even - if bit < 4 { + if bit < (bitsPerChar - 1) { bit++ } else { geoHash += string(base32encoding.enc[ch]) diff --git a/vendor/github.com/blugelabs/bluge/numeric/geo/parse.go b/vendor/github.com/blugelabs/bluge/numeric/geo/parse.go new file mode 100644 index 000000000..6dc34b098 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/numeric/geo/parse.go @@ -0,0 +1,196 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package geo + +import ( + "reflect" + "strconv" + "strings" +) + +const lonLatSliceLen = 2 + +// ExtractGeoPoint takes an arbitrary interface{} and tries it's best to +// interpret it is as geo point. Supported formats: +// Container: +// slice length 2 (GeoJSON) +// first element lon, second element lat +// string (coordinates separated by comma, or a geohash) +// first element lat, second element lon +// map[string]interface{} +// exact keys lat and lon or lng +// struct +// w/exported fields case-insensitive match on lat and lon or lng +// struct +// satisfying Later and Loner or Lnger interfaces +// +// in all cases values must be some sort of numeric-like thing: int/uint/float +func ExtractGeoPoint(thing interface{}) (lon, lat float64, success bool) { + var foundLon, foundLat bool + + thingVal := reflect.ValueOf(thing) + if !thingVal.IsValid() { + return lon, lat, false + } + + thingTyp := thingVal.Type() + + if thingVal.Kind() == reflect.Slice { + // is it a slice + lon, foundLon, lat, foundLat = extractSlice(thingVal) + } else if thingVal.Kind() == reflect.String { + // is it a string + lon, foundLon, lat, foundLat = extractString(thingVal) + } else if l, ok := thing.(map[string]interface{}); ok { + // is it a map + lon, foundLon, lat, foundLat = extractMap(l) + } else if thingVal.Kind() == reflect.Struct { + // now try reflection on struct fields + lon, foundLon, lat, foundLat = extractStruct(thingVal, thingTyp) + } + + // last hope, some interfaces + // lon + switch l := thing.(type) { + case loner: + lon = l.Lon() + foundLon = true + case lnger: + lon = l.Lng() + foundLon = true + } + + // lat + if l, ok := thing.(later); ok { + lat = l.Lat() + foundLat = true + } + + return lon, lat, foundLon && foundLat +} + +func extractStruct(thingVal reflect.Value, thingTyp reflect.Type) (lon float64, foundLon bool, lat float64, foundLat bool) { + for i := 0; i < thingVal.NumField(); i++ { + fieldName := thingTyp.Field(i).Name + if strings.HasPrefix(strings.ToLower(fieldName), "lon") { + if thingVal.Field(i).CanInterface() { + fieldVal := thingVal.Field(i).Interface() + lon, foundLon = extractNumericVal(fieldVal) + } + } + if strings.HasPrefix(strings.ToLower(fieldName), "lng") { + if thingVal.Field(i).CanInterface() { + fieldVal := thingVal.Field(i).Interface() + lon, foundLon = extractNumericVal(fieldVal) + } + } + if strings.HasPrefix(strings.ToLower(fieldName), "lat") { + if thingVal.Field(i).CanInterface() { + fieldVal := thingVal.Field(i).Interface() + lat, foundLat = extractNumericVal(fieldVal) + } + } + } + return lon, foundLon, lat, foundLat +} + +func extractMap(l map[string]interface{}) (lon float64, foundLon bool, lat float64, foundLat bool) { + if lval, ok := l["lon"]; ok { + lon, foundLon = extractNumericVal(lval) + } else if lval, ok := l["lng"]; ok { + lon, foundLon = extractNumericVal(lval) + } + if lval, ok := l["lat"]; ok { + lat, foundLat = extractNumericVal(lval) + } + return lon, foundLon, lat, foundLat +} + +func extractString(thingVal reflect.Value) (lon float64, foundLon bool, lat float64, foundLat bool) { + geoStr := thingVal.Interface().(string) + if strings.Contains(geoStr, ",") { + // geo point with coordinates split by comma + points := strings.Split(geoStr, ",") + for i, point := range points { + // trim any leading or trailing white spaces + points[i] = strings.TrimSpace(point) + } + if len(points) == lonLatSliceLen { + var err error + lat, err = strconv.ParseFloat(points[0], 64) + if err == nil { + foundLat = true + } + lon, err = strconv.ParseFloat(points[1], 64) + if err == nil { + foundLon = true + } + } + } else if len(geoStr) <= geoHashMaxLength { + lat, lon = DecodeGeoHash(geoStr) + foundLat = true + foundLon = true + } + return lon, foundLon, lat, foundLat +} + +func extractSlice(thingVal reflect.Value) (lon float64, foundLon bool, lat float64, foundLat bool) { + // must be length 2 + if thingVal.Len() == lonLatSliceLen { + first := thingVal.Index(0) + if first.CanInterface() { + firstVal := first.Interface() + lon, foundLon = extractNumericVal(firstVal) + } + second := thingVal.Index(1) + if second.CanInterface() { + secondVal := second.Interface() + lat, foundLat = extractNumericVal(secondVal) + } + } + return lon, foundLon, lat, foundLat +} + +// extract numeric value (if possible) and returns a float64 +func extractNumericVal(v interface{}) (float64, bool) { + val := reflect.ValueOf(v) + if !val.IsValid() { + return 0, false + } + typ := val.Type() + switch typ.Kind() { + case reflect.Float32, reflect.Float64: + return val.Float(), true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(val.Int()), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(val.Uint()), true + } + + return 0, false +} + +// various support interfaces which can be used to find lat/lon +type loner interface { + Lon() float64 +} + +type later interface { + Lat() float64 +} + +type lnger interface { + Lng() float64 +} diff --git a/vendor/github.com/blevesearch/bleve/v2/geo/sloppy.go b/vendor/github.com/blugelabs/bluge/numeric/geo/sloppy.go similarity index 97% rename from vendor/github.com/blevesearch/bleve/v2/geo/sloppy.go rename to vendor/github.com/blugelabs/bluge/numeric/geo/sloppy.go index 0ce646d74..42fbc5a48 100644 --- a/vendor/github.com/blevesearch/bleve/v2/geo/sloppy.go +++ b/vendor/github.com/blugelabs/bluge/numeric/geo/sloppy.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -129,7 +129,7 @@ func init() { lat := math.Pi * float64(i) / (2*radiusTabsSize - 1) one := math.Pow(a2*math.Cos(lat), 2) two := math.Pow(b2*math.Sin(lat), 2) - three := math.Pow(float64(a)*math.Cos(lat), 2) + three := math.Pow(a*math.Cos(lat), 2) four := math.Pow(b*math.Sin(lat), 2) radius := math.Sqrt((one + two) / (three + four)) earthDiameterPerLatitude[i] = 2 * radius / 1000 @@ -181,7 +181,8 @@ func asin(a float64) float64 { if a <= asinMaxValueForTabs { index := int(a*asinIndexer + 0.5) delta := a - float64(index)*asinDelta - result := asinTab[index] + delta*(asinDer1DivF1Tab[index]+delta*(asinDer2DivF2Tab[index]+delta*(asinDer3DivF3Tab[index]+delta*asinDer4DivF4Tab[index]))) + result := asinTab[index] + delta*(asinDer1DivF1Tab[index]+delta*(asinDer2DivF2Tab[index]+delta* + (asinDer3DivF3Tab[index]+delta*asinDer4DivF4Tab[index]))) if negateResult { return -result } diff --git a/vendor/github.com/blevesearch/bleve/v2/numeric/prefix_coded.go b/vendor/github.com/blugelabs/bluge/numeric/prefix_coded.go similarity index 91% rename from vendor/github.com/blevesearch/bleve/v2/numeric/prefix_coded.go rename to vendor/github.com/blugelabs/bluge/numeric/prefix_coded.go index 29bd0fc5c..e8e11d3aa 100644 --- a/vendor/github.com/blevesearch/bleve/v2/numeric/prefix_coded.go +++ b/vendor/github.com/blugelabs/bluge/numeric/prefix_coded.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -71,7 +71,7 @@ func MustNewPrefixCodedInt64(in int64, shift uint) PrefixCoded { func (p PrefixCoded) Shift() (uint, error) { if len(p) > 0 { shift := p[0] - ShiftStartInt64 - if shift < 0 || shift < 63 { + if shift < 63 { return uint(shift), nil } } @@ -88,14 +88,14 @@ func (p PrefixCoded) Int64() (int64, error) { sortableBits <<= 7 sortableBits |= int64(inbyte) } - return int64(uint64((sortableBits << shift)) ^ 0x8000000000000000), nil + return int64(uint64(sortableBits< 0 { if p[0] < ShiftStartInt64 || p[0] > ShiftStartInt64+63 { return false, 0 diff --git a/vendor/github.com/blugelabs/bluge/query.go b/vendor/github.com/blugelabs/bluge/query.go new file mode 100644 index 000000000..4404ea4ce --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/query.go @@ -0,0 +1,1476 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + "fmt" + "math" + "strings" + "time" + + "github.com/blugelabs/bluge/search/similarity" + + "github.com/blugelabs/bluge/analysis" + "github.com/blugelabs/bluge/analysis/tokenizer" + "github.com/blugelabs/bluge/numeric" + "github.com/blugelabs/bluge/numeric/geo" + "github.com/blugelabs/bluge/search" + "github.com/blugelabs/bluge/search/searcher" +) + +// A Query represents a description of the type +// and parameters for a query into the index. +type Query interface { + Searcher(i search.Reader, + options search.SearcherOptions) (search.Searcher, error) +} + +type querySlice []Query + +func (s querySlice) searchers(i search.Reader, options search.SearcherOptions) (rv []search.Searcher, err error) { + for _, q := range s { + var sr search.Searcher + sr, err = q.Searcher(i, options) + if err != nil { + // close all the already opened searchers + for _, rvs := range rv { + _ = rvs.Close() + } + return nil, err + } + rv = append(rv, sr) + } + return rv, nil +} + +func (s querySlice) disjunction(i search.Reader, options search.SearcherOptions, min int) (search.Searcher, error) { + constituents, err := s.searchers(i, options) + if err != nil { + return nil, err + } + return searcher.NewDisjunctionSearcher(i, constituents, min, similarity.NewCompositeSumScorer(), options) +} + +func (s querySlice) conjunction(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + constituents, err := s.searchers(i, options) + if err != nil { + return nil, err + } + return searcher.NewConjunctionSearcher(i, constituents, similarity.NewCompositeSumScorer(), options) +} + +type validatableQuery interface { + Query + Validate() error +} + +type boost float64 + +func (b *boost) Value() float64 { + if b == nil { + return 1 + } + return float64(*b) +} + +type BooleanQuery struct { + musts querySlice + shoulds querySlice + mustNots querySlice + boost *boost + scorer search.CompositeScorer + minShould int +} + +// NewBooleanQuery creates a compound Query composed +// of several other Query objects. +// These other query objects are added using the +// AddMust() AddShould() and AddMustNot() methods. +// Result documents must satisfy ALL of the +// must Queries. +// Result documents must satisfy NONE of the must not +// Queries. +// Result documents that ALSO satisfy any of the should +// Queries will score higher. +func NewBooleanQuery() *BooleanQuery { + return &BooleanQuery{} +} + +// SetMinShould requires that at least minShould of the +// should Queries must be satisfied. +func (q *BooleanQuery) SetMinShould(minShould int) *BooleanQuery { + q.minShould = minShould + return q +} + +func (q *BooleanQuery) AddMust(m ...Query) *BooleanQuery { + q.musts = append(q.musts, m...) + return q +} + +// Musts returns the queries that the documents must match +func (q *BooleanQuery) Musts() []Query { + return q.musts +} + +func (q *BooleanQuery) AddShould(m ...Query) *BooleanQuery { + q.shoulds = append(q.shoulds, m...) + return q +} + +// Shoulds returns queries that the documents may match +func (q *BooleanQuery) Shoulds() []Query { + return q.shoulds +} + +func (q *BooleanQuery) AddMustNot(m ...Query) *BooleanQuery { + q.mustNots = append(q.mustNots, m...) + return q +} + +// MustNots returns queries that the documents must not match +func (q *BooleanQuery) MustNots() []Query { + return q.mustNots +} + +// MinShould returns the minimum number of should queries that need to match +func (q *BooleanQuery) MinShould() int { + return q.minShould +} + +func (q *BooleanQuery) SetBoost(b float64) *BooleanQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *BooleanQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *BooleanQuery) initPrimarySearchers(i search.Reader, options search.SearcherOptions) ( + mustSearcher, shouldSearcher, mustNotSearcher search.Searcher, err error) { + if len(q.mustNots) > 0 { + mustNotSearcher, err = q.mustNots.disjunction(i, options, 1) + if err != nil { + return nil, nil, nil, err + } + } + + if len(q.musts) > 0 { + mustSearcher, err = q.musts.conjunction(i, options) + if err != nil { + if mustNotSearcher != nil { + _ = mustNotSearcher.Close() + } + return nil, nil, nil, err + } + } + + if len(q.shoulds) > 0 { + shouldSearcher, err = q.shoulds.disjunction(i, options, q.minShould) + if err != nil { + if mustNotSearcher != nil { + _ = mustNotSearcher.Close() + } + if mustSearcher != nil { + _ = mustSearcher.Close() + } + return nil, nil, nil, err + } + } + + return mustSearcher, shouldSearcher, mustNotSearcher, nil +} + +func (q *BooleanQuery) Searcher(i search.Reader, options search.SearcherOptions) (rv search.Searcher, err error) { + mustSearcher, shouldSearcher, mustNotSearcher, err := q.initPrimarySearchers(i, options) + if err != nil { + return nil, err + } + + mustSearcher = replaceMatchNoneWithNil(mustSearcher) + shouldSearcher = replaceMatchNoneWithNil(shouldSearcher) + mustNotSearcher = replaceMatchNoneWithNil(mustNotSearcher) + + if mustSearcher == nil && shouldSearcher == nil && mustNotSearcher == nil { + // if all 3 are nil, return MatchNone + return searcher.NewMatchNoneSearcher(i, options) + } else if mustSearcher == nil && shouldSearcher != nil && mustNotSearcher == nil { + // optimization, if only should searcher, just return it instead + return shouldSearcher, nil + } else if mustSearcher == nil && shouldSearcher == nil && mustNotSearcher != nil { + // if only mustNotSearcher, start with MatchAll + var err error + mustSearcher, err = searcher.NewMatchAllSearcher(i, 1, similarity.ConstantScorer(1), options) + if err != nil { + return nil, err + } + } + + if q.scorer == nil { + q.scorer = similarity.NewCompositeSumScorer() + } + + return searcher.NewBooleanSearcher(mustSearcher, shouldSearcher, mustNotSearcher, q.scorer, options) +} + +func replaceMatchNoneWithNil(s search.Searcher) search.Searcher { + if _, ok := s.(*searcher.MatchNoneSearcher); ok { + return nil + } + return s +} + +func (q *BooleanQuery) Validate() error { + if len(q.musts) > 0 { + for _, mq := range q.musts { + if mq, ok := mq.(validatableQuery); ok { + err := mq.Validate() + if err != nil { + return err + } + } + } + } + if len(q.shoulds) > 0 { + for _, sq := range q.shoulds { + if sq, ok := sq.(validatableQuery); ok { + err := sq.Validate() + if err != nil { + return err + } + } + } + } + if len(q.mustNots) > 0 { + for _, mnq := range q.mustNots { + if mnq, ok := mnq.(validatableQuery); ok { + err := mnq.Validate() + if err != nil { + return err + } + } + } + } + if len(q.musts) == 0 && len(q.shoulds) == 0 && len(q.mustNots) == 0 { + return fmt.Errorf("boolean query must contain at least one must or should or not must clause") + } + return nil +} + +type DateRangeQuery struct { + start time.Time + end time.Time + inclusiveStart bool + inclusiveEnd bool + field string + boost *boost + scorer search.Scorer +} + +// NewDateRangeQuery creates a new Query for ranges +// of date values. +// Date strings are parsed using the DateTimeParser configured in the +// top-level config.QueryDateTimeParser +// Either, but not both endpoints can be nil. +func NewDateRangeQuery(start, end time.Time) *DateRangeQuery { + return NewDateRangeInclusiveQuery(start, end, true, false) +} + +// NewDateRangeInclusiveQuery creates a new Query for ranges +// of date values. +// Date strings are parsed using the DateTimeParser configured in the +// top-level config.QueryDateTimeParser +// Either, but not both endpoints can be nil. +// startInclusive and endInclusive control inclusion of the endpoints. +func NewDateRangeInclusiveQuery(start, end time.Time, startInclusive, endInclusive bool) *DateRangeQuery { + return &DateRangeQuery{ + start: start, + end: end, + inclusiveStart: startInclusive, + inclusiveEnd: endInclusive, + } +} + +// Start returns the date range start and if the start is included in the query +func (q *DateRangeQuery) Start() (time.Time, bool) { + return q.start, q.inclusiveStart +} + +// End returns the date range end and if the end is included in the query +func (q *DateRangeQuery) End() (time.Time, bool) { + return q.end, q.inclusiveEnd +} + +func (q *DateRangeQuery) SetBoost(b float64) *DateRangeQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *DateRangeQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *DateRangeQuery) SetField(f string) *DateRangeQuery { + q.field = f + return q +} + +func (q *DateRangeQuery) Field() string { + return q.field +} + +func (q *DateRangeQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + min, max, err := q.parseEndpoints() + if err != nil { + return nil, err + } + + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + + if q.scorer == nil { + q.scorer = similarity.ConstantScorer(1) + } + + return searcher.NewNumericRangeSearcher(i, min, max, q.inclusiveStart, q.inclusiveEnd, field, + q.boost.Value(), q.scorer, similarity.NewCompositeSumScorer(), options) +} + +func (q *DateRangeQuery) parseEndpoints() (min, max float64, err error) { + min = math.Inf(-1) + max = math.Inf(1) + if !q.start.IsZero() { + if !isDatetimeCompatible(q.start) { + // overflow + return 0, 0, fmt.Errorf("invalid/unsupported date range, start: %v", q.start) + } + startInt64 := q.start.UnixNano() + min = numeric.Int64ToFloat64(startInt64) + } + if !q.end.IsZero() { + if !isDatetimeCompatible(q.end) { + // overflow + return 0, 0, fmt.Errorf("invalid/unsupported date range, end: %v", q.end) + } + endInt64 := q.end.UnixNano() + max = numeric.Int64ToFloat64(endInt64) + } + + return min, max, nil +} + +func (q *DateRangeQuery) Validate() error { + if q.start.IsZero() && q.end.IsZero() { + return fmt.Errorf("must specify start or end") + } + _, _, err := q.parseEndpoints() + if err != nil { + return err + } + return nil +} + +func isDatetimeCompatible(t time.Time) bool { + if t.Before(time.Unix(0, math.MinInt64)) || t.After(time.Unix(0, math.MaxInt64)) { + return false + } + + return true +} + +type FuzzyQuery struct { + term string + prefix int + fuzziness int + field string + boost *boost + scorer search.Scorer +} + +// NewFuzzyQuery creates a new Query which finds +// documents containing terms within a specific +// fuzziness of the specified term. +// The default fuzziness is 1. +// +// The current implementation uses Levenshtein edit +// distance as the fuzziness metric. +func NewFuzzyQuery(term string) *FuzzyQuery { + return &FuzzyQuery{ + term: term, + fuzziness: 1, + } +} + +// Term returns the term being queried +func (q *FuzzyQuery) Term() string { + return q.term +} + +// PrefixLen returns the prefix match value +func (q *FuzzyQuery) Prefix() int { + return q.prefix +} + +// Fuzziness returns the fuzziness of the query +func (q *FuzzyQuery) Fuzziness() int { + return q.fuzziness +} + +func (q *FuzzyQuery) SetBoost(b float64) *FuzzyQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *FuzzyQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *FuzzyQuery) SetField(f string) *FuzzyQuery { + q.field = f + return q +} + +func (q *FuzzyQuery) Field() string { + return q.field +} + +func (q *FuzzyQuery) SetFuzziness(f int) *FuzzyQuery { + q.fuzziness = f + return q +} + +func (q *FuzzyQuery) SetPrefix(p int) *FuzzyQuery { + q.prefix = p + return q +} + +func (q *FuzzyQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + return searcher.NewFuzzySearcher(i, q.term, q.prefix, q.fuzziness, field, q.boost.Value(), + q.scorer, similarity.NewCompositeSumScorer(), options) +} + +type GeoBoundingBoxQuery struct { + topLeft []float64 + bottomRight []float64 + field string + boost *boost + scorer search.Scorer +} + +// NewGeoBoundingBoxQuery creates a new Query for performing geo bounding +// box searches. The arguments describe the position of the box and documents +// which have an indexed geo point inside the box will be returned. +func NewGeoBoundingBoxQuery(topLeftLon, topLeftLat, bottomRightLon, bottomRightLat float64) *GeoBoundingBoxQuery { + return &GeoBoundingBoxQuery{ + topLeft: []float64{topLeftLon, topLeftLat}, + bottomRight: []float64{bottomRightLon, bottomRightLat}, + } +} + +// TopLeft returns the start corner of the bounding box +func (q *GeoBoundingBoxQuery) TopLeft() []float64 { + return q.topLeft +} + +// BottomRight returns the end cornder of the bounding box +func (q *GeoBoundingBoxQuery) BottomRight() []float64 { + return q.bottomRight +} + +func (q *GeoBoundingBoxQuery) SetBoost(b float64) *GeoBoundingBoxQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *GeoBoundingBoxQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *GeoBoundingBoxQuery) SetField(f string) *GeoBoundingBoxQuery { + q.field = f + return q +} + +func (q *GeoBoundingBoxQuery) Field() string { + return q.field +} + +func (q *GeoBoundingBoxQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + + if q.scorer == nil { + q.scorer = similarity.ConstantScorer(1) + } + + if q.bottomRight[0] < q.topLeft[0] { + // cross date line, rewrite as two parts + + leftSearcher, err := searcher.NewGeoBoundingBoxSearcher(i, + -180, q.bottomRight[1], q.bottomRight[0], q.topLeft[1], + field, q.boost.Value(), q.scorer, similarity.NewCompositeSumScorer(), + options, true, geoPrecisionStep) + if err != nil { + return nil, err + } + rightSearcher, err := searcher.NewGeoBoundingBoxSearcher(i, + q.topLeft[0], q.bottomRight[1], 180, q.topLeft[1], + field, q.boost.Value(), q.scorer, similarity.NewCompositeSumScorer(), + options, true, geoPrecisionStep) + if err != nil { + _ = leftSearcher.Close() + return nil, err + } + + return searcher.NewDisjunctionSearcher(i, []search.Searcher{leftSearcher, rightSearcher}, + 0, similarity.NewCompositeSumScorer(), options) + } + + return searcher.NewGeoBoundingBoxSearcher(i, q.topLeft[0], q.bottomRight[1], q.bottomRight[0], q.topLeft[1], + field, q.boost.Value(), q.scorer, similarity.NewCompositeSumScorer(), + options, true, geoPrecisionStep) +} + +func (q *GeoBoundingBoxQuery) Validate() error { + return nil +} + +type GeoDistanceQuery struct { + location []float64 + distance string + field string + boost *boost + scorer search.Scorer +} + +// NewGeoDistanceQuery creates a new Query for performing geo distance +// searches. The arguments describe a position and a distance. Documents +// which have an indexed geo point which is less than or equal to the provided +// distance from the given position will be returned. +func NewGeoDistanceQuery(lon, lat float64, distance string) *GeoDistanceQuery { + return &GeoDistanceQuery{ + location: []float64{lon, lat}, + distance: distance, + } +} + +// Location returns the location being queried +func (q *GeoDistanceQuery) Location() []float64 { + return q.location +} + +// Distance returns the distance being queried +func (q *GeoDistanceQuery) Distance() string { + return q.distance +} + +func (q *GeoDistanceQuery) SetBoost(b float64) *GeoDistanceQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *GeoDistanceQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *GeoDistanceQuery) SetField(f string) *GeoDistanceQuery { + q.field = f + return q +} + +func (q *GeoDistanceQuery) Field() string { + return q.field +} + +func (q *GeoDistanceQuery) Searcher(i search.Reader, + options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + + dist, err := geo.ParseDistance(q.distance) + if err != nil { + return nil, err + } + + return searcher.NewGeoPointDistanceSearcher(i, q.location[0], q.location[1], dist, + field, q.boost.Value(), q.scorer, similarity.NewCompositeSumScorer(), options, geoPrecisionStep) +} + +func (q *GeoDistanceQuery) Validate() error { + return nil +} + +type GeoBoundingPolygonQuery struct { + points []geo.Point + field string + boost *boost + scorer search.Scorer +} + +// FIXME document like the others +func NewGeoBoundingPolygonQuery(points []geo.Point) *GeoBoundingPolygonQuery { + return &GeoBoundingPolygonQuery{ + points: points} +} + +// Points returns all the points being queried inside the bounding box +func (q *GeoBoundingPolygonQuery) Points() []geo.Point { + return q.points +} + +func (q *GeoBoundingPolygonQuery) SetBoost(b float64) *GeoBoundingPolygonQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *GeoBoundingPolygonQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *GeoBoundingPolygonQuery) SetField(f string) *GeoBoundingPolygonQuery { + q.field = f + return q +} + +func (q *GeoBoundingPolygonQuery) Field() string { + return q.field +} + +func (q *GeoBoundingPolygonQuery) Searcher(i search.Reader, + options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + + return searcher.NewGeoBoundedPolygonSearcher(i, q.points, field, q.boost.Value(), + q.scorer, similarity.NewCompositeSumScorer(), options, geoPrecisionStep) +} + +func (q *GeoBoundingPolygonQuery) Validate() error { + return nil +} + +type MatchAllQuery struct { + boost *boost +} + +// NewMatchAllQuery creates a Query which will +// match all documents in the index. +func NewMatchAllQuery() *MatchAllQuery { + return &MatchAllQuery{} +} + +func (q *MatchAllQuery) SetBoost(b float64) *MatchAllQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *MatchAllQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *MatchAllQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + return searcher.NewMatchAllSearcher(i, q.boost.Value(), similarity.ConstantScorer(1), options) +} + +type MatchNoneQuery struct { + boost *boost +} + +// NewMatchNoneQuery creates a Query which will not +// match any documents in the index. +func NewMatchNoneQuery() *MatchNoneQuery { + return &MatchNoneQuery{} +} + +func (q *MatchNoneQuery) SetBoost(b float64) *MatchNoneQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *MatchNoneQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *MatchNoneQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + return searcher.NewMatchNoneSearcher(i, options) +} + +type MatchPhraseQuery struct { + matchPhrase string + field string + analyzer *analysis.Analyzer + boost *boost + slop int +} + +// NewMatchPhraseQuery creates a new Query object +// for matching phrases in the index. +// An Analyzer is chosen based on the field. +// Input text is analyzed using this analyzer. +// Token terms resulting from this analysis are +// used to build a search phrase. Result documents +// must match this phrase. Queried field must have been indexed with +// IncludeTermVectors set to true. +func NewMatchPhraseQuery(matchPhrase string) *MatchPhraseQuery { + return &MatchPhraseQuery{ + matchPhrase: matchPhrase, + } +} + +// Phrase returns the phrase being queried +func (q *MatchPhraseQuery) Phrase() string { + return q.matchPhrase +} + +func (q *MatchPhraseQuery) SetBoost(b float64) *MatchPhraseQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *MatchPhraseQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *MatchPhraseQuery) SetField(f string) *MatchPhraseQuery { + q.field = f + return q +} + +func (q *MatchPhraseQuery) Field() string { + return q.field +} + +// Slop returns the acceptable distance between tokens +func (q *MatchPhraseQuery) Slop() int { + return q.slop +} + +// SetSlop updates the sloppyness of the query +// the phrase terms can be as "dist" terms away from each other +func (q *MatchPhraseQuery) SetSlop(dist int) *MatchPhraseQuery { + q.slop = dist + return q +} + +func (q *MatchPhraseQuery) SetAnalyzer(a *analysis.Analyzer) *MatchPhraseQuery { + q.analyzer = a + return q +} + +func (q *MatchPhraseQuery) Analyzer() *analysis.Analyzer { + return q.analyzer +} + +func (q *MatchPhraseQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + + var tokens analysis.TokenStream + if q.analyzer != nil { + tokens = q.analyzer.Analyze([]byte(q.matchPhrase)) + } else if options.DefaultAnalyzer != nil { + tokens = options.DefaultAnalyzer.Analyze([]byte(q.matchPhrase)) + } else { + tokens = tokenizer.MakeTokenStream([]byte(q.matchPhrase)) + } + + if len(tokens) > 0 { + phrase := tokenStreamToPhrase(tokens) + phraseQuery := NewMultiPhraseQuery(phrase) + phraseQuery.SetField(field) + phraseQuery.SetBoost(q.boost.Value()) + phraseQuery.SetSlop(q.slop) + return phraseQuery.Searcher(i, options) + } + noneQuery := NewMatchNoneQuery() + return noneQuery.Searcher(i, options) +} + +func tokenStreamToPhrase(tokens analysis.TokenStream) [][]string { + firstPosition := int(^uint(0) >> 1) + lastPosition := 0 + var currPosition int + for _, token := range tokens { + currPosition += token.PositionIncr + if currPosition < firstPosition { + firstPosition = currPosition + } + if currPosition > lastPosition { + lastPosition = currPosition + } + } + phraseLen := lastPosition - firstPosition + 1 + if phraseLen > 0 { + rv := make([][]string, phraseLen) + currPosition = 0 + for _, token := range tokens { + currPosition += token.PositionIncr + pos := currPosition - firstPosition + rv[pos] = append(rv[pos], string(token.Term)) + } + return rv + } + return nil +} + +type MatchQueryOperator int + +const ( + // Document must satisfy AT LEAST ONE of term searches. + MatchQueryOperatorOr = 0 + // Document must satisfy ALL of term searches. + MatchQueryOperatorAnd = 1 +) + +type MatchQuery struct { + match string + field string + analyzer *analysis.Analyzer + boost *boost + prefix int + fuzziness int + operator MatchQueryOperator +} + +// NewMatchQuery creates a Query for matching text. +// An Analyzer is chosen based on the field. +// Input text is analyzed using this analyzer. +// Token terms resulting from this analysis are +// used to perform term searches. Result documents +// must satisfy at least one of these term searches. +func NewMatchQuery(match string) *MatchQuery { + return &MatchQuery{ + match: match, + operator: MatchQueryOperatorOr, + } +} + +// Match returns the term being queried +func (q *MatchQuery) Match() string { + return q.match +} + +func (q *MatchQuery) SetBoost(b float64) *MatchQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *MatchQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *MatchQuery) SetField(f string) *MatchQuery { + q.field = f + return q +} + +func (q *MatchQuery) Field() string { + return q.field +} + +func (q *MatchQuery) SetFuzziness(f int) *MatchQuery { + q.fuzziness = f + return q +} + +func (q *MatchQuery) Fuzziness() int { + return q.fuzziness +} + +func (q *MatchQuery) SetPrefix(p int) *MatchQuery { + q.prefix = p + return q +} + +func (q *MatchQuery) Prefix() int { + return q.prefix +} + +func (q *MatchQuery) Analyzer() *analysis.Analyzer { + return q.analyzer +} + +func (q *MatchQuery) SetAnalyzer(a *analysis.Analyzer) *MatchQuery { + q.analyzer = a + return q +} + +func (q *MatchQuery) SetOperator(operator MatchQueryOperator) *MatchQuery { + q.operator = operator + return q +} + +func (q *MatchQuery) Operator() MatchQueryOperator { + return q.operator +} + +func (q *MatchQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + + var tokens analysis.TokenStream + if q.analyzer != nil { + tokens = q.analyzer.Analyze([]byte(q.match)) + } else if options.DefaultAnalyzer != nil { + tokens = options.DefaultAnalyzer.Analyze([]byte(q.match)) + } else { + tokens = tokenizer.MakeTokenStream([]byte(q.match)) + } + + if len(tokens) > 0 { + tqs := make([]Query, len(tokens)) + if q.fuzziness != 0 { + for i, token := range tokens { + query := NewFuzzyQuery(string(token.Term)) + query.SetFuzziness(q.fuzziness) + query.SetPrefix(q.prefix) + query.SetField(field) + query.SetBoost(q.boost.Value()) + tqs[i] = query + } + } else { + for i, token := range tokens { + tq := NewTermQuery(string(token.Term)) + tq.SetField(field) + tq.SetBoost(q.boost.Value()) + tqs[i] = tq + } + } + + switch q.operator { + case MatchQueryOperatorOr: + booleanQuery := NewBooleanQuery() + booleanQuery.AddShould(tqs...) + booleanQuery.SetMinShould(1) + booleanQuery.SetBoost(q.boost.Value()) + return booleanQuery.Searcher(i, options) + + case MatchQueryOperatorAnd: + booleanQuery := NewBooleanQuery() + booleanQuery.AddMust(tqs...) + booleanQuery.SetBoost(q.boost.Value()) + return booleanQuery.Searcher(i, options) + + default: + return nil, fmt.Errorf("unhandled operator %d", q.operator) + } + } + noneQuery := NewMatchNoneQuery() + return noneQuery.Searcher(i, options) +} + +type MultiPhraseQuery struct { + terms [][]string + field string + boost *boost + scorer search.Scorer + slop int +} + +// NewMultiPhraseQuery creates a new Query for finding +// term phrases in the index. +// It is like PhraseQuery, but each position in the +// phrase may be satisfied by a list of terms +// as opposed to just one. +// At least one of the terms must exist in the correct +// order, at the correct index offsets, in the +// specified field. Queried field must have been indexed with +// IncludeTermVectors set to true. +func NewMultiPhraseQuery(terms [][]string) *MultiPhraseQuery { + return &MultiPhraseQuery{ + terms: terms, + } +} + +// Terms returns the term phrases being queried +func (q *MultiPhraseQuery) Terms() [][]string { + return q.terms +} + +func (q *MultiPhraseQuery) SetBoost(b float64) *MultiPhraseQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *MultiPhraseQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *MultiPhraseQuery) SetField(f string) *MultiPhraseQuery { + q.field = f + return q +} + +func (q *MultiPhraseQuery) Field() string { + return q.field +} + +// Slop returns the acceptable distance between terms +func (q *MultiPhraseQuery) Slop() int { + return q.slop +} + +// SetSlop updates the sloppyness of the query +// the phrase terms can be as "dist" terms away from each other +func (q *MultiPhraseQuery) SetSlop(dist int) *MultiPhraseQuery { + q.slop = dist + return q +} + +func (q *MultiPhraseQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + + return searcher.NewSloppyMultiPhraseSearcher(i, q.terms, field, q.slop, q.scorer, options) +} + +func (q *MultiPhraseQuery) Validate() error { + if len(q.terms) < 1 { + return fmt.Errorf("phrase query must contain at least one term") + } + return nil +} + +type NumericRangeQuery struct { + min float64 + max float64 + inclusiveMin bool + inclusiveMax bool + field string + boost *boost + scorer search.Scorer +} + +var MinNumeric = math.Inf(-1) +var MaxNumeric = math.Inf(1) + +// NewNumericRangeQuery creates a new Query for ranges +// of numeric values. +// Either, but not both endpoints can be nil. +// The minimum value is inclusive. +// The maximum value is exclusive. +func NewNumericRangeQuery(min, max float64) *NumericRangeQuery { + return NewNumericRangeInclusiveQuery(min, max, true, false) +} + +// NewNumericRangeInclusiveQuery creates a new Query for ranges +// of numeric values. +// Either, but not both endpoints can be nil. +// Control endpoint inclusion with inclusiveMin, inclusiveMax. +func NewNumericRangeInclusiveQuery(min, max float64, minInclusive, maxInclusive bool) *NumericRangeQuery { + return &NumericRangeQuery{ + min: min, + max: max, + inclusiveMin: minInclusive, + inclusiveMax: maxInclusive, + } +} + +// Min returns the numeric range lower bound and if the lowerbound is included +func (q *NumericRangeQuery) Min() (float64, bool) { + return q.min, q.inclusiveMin +} + +// Max returns the numeric range upperbound and if the upperbound is included +func (q *NumericRangeQuery) Max() (float64, bool) { + return q.max, q.inclusiveMax +} + +func (q *NumericRangeQuery) SetBoost(b float64) *NumericRangeQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *NumericRangeQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *NumericRangeQuery) SetField(f string) *NumericRangeQuery { + q.field = f + return q +} + +func (q *NumericRangeQuery) Field() string { + return q.field +} + +func (q *NumericRangeQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + if q.scorer == nil { + q.scorer = similarity.ConstantScorer(1) + } + return searcher.NewNumericRangeSearcher(i, q.min, q.max, q.inclusiveMin, q.inclusiveMax, field, + q.boost.Value(), q.scorer, similarity.NewCompositeSumScorer(), options) +} + +func (q *NumericRangeQuery) Validate() error { + if q.min == MinNumeric && q.max == MaxNumeric { + return fmt.Errorf("numeric range query must specify min or max") + } + return nil +} + +type PrefixQuery struct { + prefix string + field string + boost *boost + scorer search.Scorer +} + +// NewPrefixQuery creates a new Query which finds +// documents containing terms that start with the +// specified prefix. +func NewPrefixQuery(prefix string) *PrefixQuery { + return &PrefixQuery{ + prefix: prefix, + } +} + +// Prefix return the prefix being queried +func (q *PrefixQuery) Prefix() string { + return q.prefix +} + +func (q *PrefixQuery) SetBoost(b float64) *PrefixQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *PrefixQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *PrefixQuery) SetField(f string) *PrefixQuery { + q.field = f + return q +} + +func (q *PrefixQuery) Field() string { + return q.field +} + +func (q *PrefixQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + return searcher.NewTermPrefixSearcher(i, q.prefix, field, q.boost.Value(), + q.scorer, similarity.NewCompositeSumScorer(), options) +} + +type RegexpQuery struct { + regexp string + field string + boost *boost + scorer search.Scorer +} + +// NewRegexpQuery creates a new Query which finds +// documents containing terms that match the +// specified regular expression. +func NewRegexpQuery(regexp string) *RegexpQuery { + return &RegexpQuery{ + regexp: regexp, + } +} + +// Regexp returns the regular expression being queried +func (q *RegexpQuery) Regexp() string { + return q.regexp +} + +func (q *RegexpQuery) SetBoost(b float64) *RegexpQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *RegexpQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *RegexpQuery) SetField(f string) *RegexpQuery { + q.field = f + return q +} + +func (q *RegexpQuery) Field() string { + return q.field +} + +func (q *RegexpQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + + // require that pattern NOT be anchored to start and end of term. + // do not attempt to remove trailing $, its presence is not + // known to interfere with LiteralPrefix() the way ^ does + // and removing $ introduces possible ambiguities with escaped \$, \\$, etc + actualRegexp := q.regexp + actualRegexp = strings.TrimPrefix(actualRegexp, "^") + + return searcher.NewRegexpStringSearcher(i, actualRegexp, field, + q.boost.Value(), q.scorer, similarity.NewCompositeSumScorer(), options) +} + +func (q *RegexpQuery) Validate() error { + return nil // real validation delayed until searcher constructor +} + +type TermQuery struct { + term string + field string + boost *boost + scorer search.Scorer +} + +// NewTermQuery creates a new Query for finding an +// exact term match in the index. +func NewTermQuery(term string) *TermQuery { + return &TermQuery{ + term: term, + } +} + +func (q *TermQuery) SetBoost(b float64) *TermQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *TermQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *TermQuery) SetField(f string) *TermQuery { + q.field = f + return q +} + +func (q *TermQuery) Field() string { + return q.field +} + +// Term returns the exact term being queried +func (q *TermQuery) Term() string { + return q.term +} + +func (q *TermQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + return searcher.NewTermSearcher(i, q.term, field, q.boost.Value(), q.scorer, options) +} + +type TermRangeQuery struct { + min string + max string + inclusiveMin bool + inclusiveMax bool + field string + boost *boost + scorer search.Scorer +} + +// NewTermRangeQuery creates a new Query for ranges +// of text terms. +// Either, but not both endpoints can be "". +// The minimum value is inclusive. +// The maximum value is exclusive. +func NewTermRangeQuery(min, max string) *TermRangeQuery { + return NewTermRangeInclusiveQuery(min, max, true, false) +} + +// NewTermRangeInclusiveQuery creates a new Query for ranges +// of text terms. +// Either, but not both endpoints can be "". +// Control endpoint inclusion with inclusiveMin, inclusiveMax. +func NewTermRangeInclusiveQuery(min, max string, minInclusive, maxInclusive bool) *TermRangeQuery { + return &TermRangeQuery{ + min: min, + max: max, + inclusiveMin: minInclusive, + inclusiveMax: maxInclusive, + } +} + +func (q *TermRangeQuery) SetBoost(b float64) *TermRangeQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *TermRangeQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *TermRangeQuery) SetField(f string) *TermRangeQuery { + q.field = f + return q +} + +func (q *TermRangeQuery) Field() string { + return q.field +} + +func (q *TermRangeQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + var minTerm []byte + if q.min != "" { + minTerm = []byte(q.min) + } + var maxTerm []byte + if q.max != "" { + maxTerm = []byte(q.max) + } + return searcher.NewTermRangeSearcher(i, minTerm, maxTerm, q.inclusiveMin, q.inclusiveMax, field, + q.boost.Value(), q.scorer, similarity.NewCompositeSumScorer(), options) +} + +func (q *TermRangeQuery) Validate() error { + if q.min == "" && q.max == "" { + return fmt.Errorf("term range query must specify min or max") + } + return nil +} + +// Min returns the query lower bound and if the lower bound is included in query +func (q *TermRangeQuery) Min() (string, bool) { + return q.min, q.inclusiveMin +} + +// Max returns the query upperbound and if the upper bound is included in the query +func (q *TermRangeQuery) Max() (string, bool) { + return q.max, q.inclusiveMax +} + +type WildcardQuery struct { + wildcard string + field string + boost *boost + scorer search.Scorer +} + +// NewWildcardQuery creates a new Query which finds +// documents containing terms that match the +// specified wildcard. In the wildcard pattern '*' +// will match any sequence of 0 or more characters, +// and '?' will match any single character. +func NewWildcardQuery(wildcard string) *WildcardQuery { + return &WildcardQuery{ + wildcard: wildcard, + } +} + +// Wildcard returns the wildcard being queried +func (q *WildcardQuery) Wildcard() string { + return q.wildcard +} + +func (q *WildcardQuery) SetBoost(b float64) *WildcardQuery { + boostVal := boost(b) + q.boost = &boostVal + return q +} + +func (q *WildcardQuery) Boost() float64 { + return q.boost.Value() +} + +func (q *WildcardQuery) SetField(f string) *WildcardQuery { + q.field = f + return q +} + +func (q *WildcardQuery) Field() string { + return q.field +} + +var wildcardRegexpReplacer = strings.NewReplacer( + // characters in the wildcard that must + // be escaped in the regexp + "+", `\+`, + "(", `\(`, + ")", `\)`, + "^", `\^`, + "$", `\$`, + ".", `\.`, + "{", `\{`, + "}", `\}`, + "[", `\[`, + "]", `\]`, + `|`, `\|`, + `\`, `\\`, + // wildcard characters + "*", ".*", + "?", ".") + +func (q *WildcardQuery) Searcher(i search.Reader, options search.SearcherOptions) (search.Searcher, error) { + field := q.field + if q.field == "" { + field = options.DefaultSearchField + } + + regexpString := wildcardRegexpReplacer.Replace(q.wildcard) + + return searcher.NewRegexpStringSearcher(i, regexpString, field, + q.boost.Value(), q.scorer, similarity.NewCompositeSumScorer(), options) +} + +func (q *WildcardQuery) Validate() error { + return nil // real validation delayed until searcher constructor +} diff --git a/vendor/github.com/blugelabs/bluge/reader.go b/vendor/github.com/blugelabs/bluge/reader.go new file mode 100644 index 000000000..7a6738afd --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/reader.go @@ -0,0 +1,100 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + "context" + "fmt" + + "github.com/blugelabs/bluge/index" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blugelabs/bluge/search" +) + +type Reader struct { + config Config + reader *index.Snapshot +} + +func OpenReader(config Config) (*Reader, error) { + rv := &Reader{ + config: config, + } + var err error + rv.reader, err = index.OpenReader(config.indexConfig) + if err != nil { + return nil, fmt.Errorf("error opening index: %w", err) + } + + return rv, nil +} + +func (r *Reader) Count() (count uint64, err error) { + return r.reader.Count() +} + +func (r *Reader) Fields() (fields []string, err error) { + return r.reader.Fields() +} + +type StoredFieldVisitor func(field string, value []byte) bool + +func (r *Reader) VisitStoredFields(number uint64, visitor StoredFieldVisitor) error { + return r.reader.VisitStoredFields(number, segment.StoredFieldVisitor(visitor)) +} + +func (r *Reader) Search(ctx context.Context, req SearchRequest) (search.DocumentMatchIterator, error) { + collector := req.Collector() + searcher, err := req.Searcher(r.reader, r.config) + if err != nil { + return nil, err + } + + memNeeded := memNeededForSearch(searcher, collector) + if r.config.SearchStartFunc != nil { + err = r.config.SearchStartFunc(memNeeded) + } + if err != nil { + return nil, err + } + if r.config.SearchEndFunc != nil { + defer r.config.SearchEndFunc(memNeeded) + } + + var dmItr search.DocumentMatchIterator + dmItr, err = collector.Collect(ctx, req.Aggregations(), searcher) + if err != nil { + return nil, err + } + + // FIXME search stats on reader? + + return dmItr, nil +} + +func (r *Reader) DictionaryIterator(field string, automaton segment.Automaton, start, end []byte) (segment.DictionaryIterator, error) { + return r.reader.DictionaryIterator(field, automaton, start, end) +} + +func (r *Reader) Backup(path string, cancel chan struct{}) error { + dir := index.NewFileSystemDirectory(path) + return r.reader.Backup(dir, cancel) +} + +func (r *Reader) Close() error { + return r.reader.Close() +} diff --git a/vendor/github.com/blugelabs/bluge/search.go b/vendor/github.com/blugelabs/bluge/search.go new file mode 100644 index 000000000..5da51a8bf --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search.go @@ -0,0 +1,271 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + "github.com/blugelabs/bluge/search" + "github.com/blugelabs/bluge/search/aggregations" + "github.com/blugelabs/bluge/search/collector" +) + +type SearchRequest interface { + Collector() search.Collector + Searcher(i search.Reader, config Config) (search.Searcher, error) + AddAggregation(name string, aggregation search.Aggregation) + Aggregations() search.Aggregations +} + +type SearchOptions struct { + ExplainScores bool + IncludeLocations bool + Score string // FIXME go away +} + +type BaseSearch struct { + query Query + options SearchOptions + aggregations search.Aggregations +} + +func (b BaseSearch) Query() Query { + return b.query +} + +func (b BaseSearch) Options() SearchOptions { + return b.options +} + +func (b BaseSearch) Aggregations() search.Aggregations { + return b.aggregations +} + +func (b BaseSearch) Searcher(i search.Reader, config Config) (search.Searcher, error) { + return b.query.Searcher(i, searchOptionsFromConfig(config, b.options)) +} + +// TopNSearch is used to search for a fixed number of matches which can be sorted by a custom sort order. +// It also allows for skipping a specified number of matches which can be used to enable pagination. +type TopNSearch struct { + BaseSearch + n int + from int + sort search.SortOrder + after [][]byte + reversed bool +} + +// NewTopNSearch creates a search which will find the matches and return the first N when ordered by the +// specified sort order (default: score descending) +func NewTopNSearch(n int, q Query) *TopNSearch { + return &TopNSearch{ + BaseSearch: BaseSearch{ + query: q, + aggregations: make(search.Aggregations), + }, + n: n, + sort: search.SortOrder{ + search.SortBy(search.DocumentScore()).Desc(), + }, + } +} + +var standardAggs = search.Aggregations{ + "count": aggregations.CountMatches(), + "max_score": aggregations.MaxStartingAt(search.DocumentScore(), 0), + "duration": aggregations.Duration(), +} + +// WithStandardAggregations adds the standard aggregations in the search query +// The standard aggregations are: +// - count (total number of documents that matched the query) +// - max_score (the highest score of all the matched documents) +// - duration (time taken performing the search) +func (s *TopNSearch) WithStandardAggregations() *TopNSearch { + for name, agg := range standardAggs { + s.AddAggregation(name, agg) + } + return s +} + +// Size returns the number of matches this search request will return +func (s *TopNSearch) Size() int { + return s.n +} + +// SetFrom sets the number of results to skip +func (s *TopNSearch) SetFrom(from int) *TopNSearch { + s.from = from + return s +} + +// From returns the number of matches that will be skipped +func (s *TopNSearch) From() int { + return s.from +} + +// After can be used to specify a sort key, any match with a sort key less than this will be skipped +func (s *TopNSearch) After(after [][]byte) *TopNSearch { + s.after = after + return s +} + +// Before can be used to specify a sort key, any match with a sort key greather than this will be skipped +func (s *TopNSearch) Before(before [][]byte) *TopNSearch { + s.after = before + s.reversed = true + return s +} + +// SortBy is a convenience method to specify search result sort order using a simple string slice. +// Strings in the slice are interpreted as the name of a field to sort ascending. +// The following special cases are handled. +// - the prefix '-' will sort in descending order +// - the special field '_score' can be used sort by score +func (s *TopNSearch) SortBy(order []string) *TopNSearch { + s.sort = search.ParseSortOrderStrings(order) + return s +} + +// SortByCustom sets a custom sort order used to sort the matches of the search +func (s *TopNSearch) SortByCustom(order search.SortOrder) *TopNSearch { + s.sort = order + return s +} + +// SortOrder returns the sort order of the current search +func (s *TopNSearch) SortOrder() search.SortOrder { + return s.sort +} + +// ExplainScores enables the addition of scoring explanation to each match +func (s *TopNSearch) ExplainScores() *TopNSearch { + s.options.ExplainScores = true + return s +} + +// IncludeLocations enables the addition of match location in the original field +func (s *TopNSearch) IncludeLocations() *TopNSearch { + s.options.IncludeLocations = true + return s +} + +func (s *TopNSearch) SetScore(mode string) *TopNSearch { + s.options.Score = mode + return s +} + +func (s *TopNSearch) Collector() search.Collector { + if s.after != nil { + collectorSort := s.sort + if s.reversed { + // preserve original sort order in the request + collectorSort = s.sort.Copy() + collectorSort.Reverse() + } + rv := collector.NewTopNCollectorAfter(s.n, collectorSort, s.after, s.reversed) + return rv + } + return collector.NewTopNCollector(s.n, s.from, s.sort) +} + +func searchOptionsFromConfig(config Config, options SearchOptions) search.SearcherOptions { + return search.SearcherOptions{ + SimilarityForField: func(field string) search.Similarity { + if pfs, ok := config.PerFieldSimilarity[field]; ok { + return pfs + } + return config.DefaultSimilarity + }, + DefaultSearchField: config.DefaultSearchField, + DefaultAnalyzer: config.DefaultSearchAnalyzer, + Explain: options.ExplainScores, + IncludeTermVectors: options.IncludeLocations, + Score: options.Score, + } +} + +func (s *TopNSearch) AddAggregation(name string, aggregation search.Aggregation) { + s.aggregations.Add(name, aggregation) +} + +type AllMatches struct { + BaseSearch +} + +func NewAllMatches(q Query) *AllMatches { + return &AllMatches{ + BaseSearch: BaseSearch{ + query: q, + aggregations: make(search.Aggregations), + }, + } +} + +func (s *AllMatches) WithStandardAggregations() *AllMatches { + for name, agg := range standardAggs { + s.AddAggregation(name, agg) + } + return s +} + +func (s *AllMatches) AddAggregation(name string, aggregation search.Aggregation) { + s.aggregations.Add(name, aggregation) +} + +func (s *AllMatches) ExplainScores() *AllMatches { + s.options.ExplainScores = true + return s +} + +func (s *AllMatches) IncludeLocations() *AllMatches { + s.options.IncludeLocations = true + return s +} + +func (s *AllMatches) Collector() search.Collector { + return collector.NewAllCollector() +} + +func (s *TopNSearch) AllMatches(i search.Reader, config Config) (search.Searcher, error) { + return s.query.Searcher(i, search.SearcherOptions{ + DefaultSearchField: config.DefaultSearchField, + Explain: s.options.ExplainScores, + IncludeTermVectors: s.options.IncludeLocations, + }) +} + +// memNeededForSearch is a helper function that returns an estimate of RAM +// needed to execute a search request. +func memNeededForSearch( + searcher search.Searcher, + coll search.Collector) uint64 { + numDocMatches := coll.BackingSize() + searcher.DocumentMatchPoolSize() + + estimate := 0 + + // overhead, size in bytes from collector + estimate += coll.Size() + + // pre-allocing DocumentMatchPool + estimate += searchContextEmptySize + numDocMatches*documentMatchEmptySize + + // searcher overhead + estimate += searcher.Size() + + // overhead from results, lowestMatchOutsideResults + estimate += (numDocMatches + 1) * documentMatchEmptySize + + return uint64(estimate) +} diff --git a/vendor/github.com/blugelabs/bluge/search/aggregations.go b/vendor/github.com/blugelabs/bluge/search/aggregations.go new file mode 100644 index 000000000..1b23e593e --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/aggregations.go @@ -0,0 +1,145 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package search + +import ( + "time" +) + +type Aggregation interface { + Fields() []string + Calculator() Calculator +} + +type Aggregations map[string]Aggregation + +func (a Aggregations) Add(name string, aggregation Aggregation) { + a[name] = aggregation +} + +func (a Aggregations) Fields() []string { + var rv []string + for _, aggregation := range a { + rv = append(rv, aggregation.Fields()...) + } + return rv +} + +type Calculator interface { + Consume(*DocumentMatch) + Finish() + Merge(Calculator) +} + +type MetricCalculator interface { + Calculator + Value() float64 +} + +type DurationCalculator interface { + Calculator + Duration() time.Duration +} + +type BucketCalculator interface { + Calculator + Buckets() []*Bucket +} + +type Bucket struct { + name string + aggregations map[string]Calculator +} + +func NewBucket(name string, aggregations map[string]Aggregation) *Bucket { + rv := &Bucket{ + name: name, + aggregations: make(map[string]Calculator), + } + for name, agg := range aggregations { + rv.aggregations[name] = agg.Calculator() + } + return rv +} + +func (b *Bucket) Merge(other *Bucket) { + for otherAggName, otherCalculator := range other.aggregations { + if thisCalculator, ok := b.aggregations[otherAggName]; ok { + thisCalculator.Merge(otherCalculator) + } else { + b.aggregations[otherAggName] = otherCalculator + } + } +} + +func (b *Bucket) Name() string { + return b.name +} + +func (b *Bucket) Consume(d *DocumentMatch) { + for _, aggCalc := range b.aggregations { + aggCalc.Consume(d) + } +} + +func (b *Bucket) Finish() { + for _, aggCalc := range b.aggregations { + aggCalc.Finish() + } +} + +func (b *Bucket) Aggregations() map[string]Calculator { + return b.aggregations +} + +func (b *Bucket) Count() uint64 { + if countAgg, ok := b.aggregations["count"]; ok { + if countCalc, ok := countAgg.(MetricCalculator); ok { + return uint64(countCalc.Value()) + } + } + return 0 +} + +func (b *Bucket) Duration() time.Duration { + if durationAgg, ok := b.aggregations["duration"]; ok { + if durationCalc, ok := durationAgg.(DurationCalculator); ok { + return durationCalc.Duration() + } + } + return 0 +} + +func (b *Bucket) Metric(name string) float64 { + if agg, ok := b.aggregations[name]; ok { + if calc, ok := agg.(MetricCalculator); ok { + return calc.Value() + } + } + return 0 +} + +func (b *Bucket) Buckets(name string) []*Bucket { + if agg, ok := b.aggregations[name]; ok { + if calc, ok := agg.(BucketCalculator); ok { + return calc.Buckets() + } + } + return nil +} + +func (b *Bucket) Aggregation(name string) Calculator { + return b.aggregations[name] +} diff --git a/vendor/github.com/blugelabs/bluge/search/aggregations/cardinality.go b/vendor/github.com/blugelabs/bluge/search/aggregations/cardinality.go new file mode 100644 index 000000000..7f86fdbb6 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/aggregations/cardinality.go @@ -0,0 +1,67 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregations + +import ( + "github.com/axiomhq/hyperloglog" + "github.com/blugelabs/bluge/search" +) + +type CardinalityMetric struct { + src search.TextValuesSource +} + +func Cardinality(src search.TextValuesSource) *CardinalityMetric { + return &CardinalityMetric{ + src: src, + } +} + +func (c *CardinalityMetric) Fields() []string { + return c.src.Fields() +} + +func (c *CardinalityMetric) Calculator() search.Calculator { + rv := &CardinalityCalculator{ + src: c.src, + sketch: hyperloglog.New16(), + } + return rv +} + +type CardinalityCalculator struct { + src search.TextValuesSource + sketch *hyperloglog.Sketch +} + +func (c *CardinalityCalculator) Value() float64 { + return float64(c.sketch.Estimate()) +} + +func (c *CardinalityCalculator) Consume(d *search.DocumentMatch) { + for _, val := range c.src.Values(d) { + c.sketch.Insert(val) + } +} + +func (c *CardinalityCalculator) Merge(other search.Calculator) { + if other, ok := other.(*CardinalityCalculator); ok { + _ = c.sketch.Merge(other.sketch) + } +} + +func (c *CardinalityCalculator) Finish() { + +} diff --git a/vendor/github.com/blugelabs/bluge/search/aggregations/count.go b/vendor/github.com/blugelabs/bluge/search/aggregations/count.go new file mode 100644 index 000000000..d82784ed3 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/aggregations/count.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregations + +import "github.com/blugelabs/bluge/search" + +var staticCount = []float64{1} + +type countingSource struct{} + +func (*countingSource) Fields() []string { + return nil +} + +func (*countingSource) Numbers(_ *search.DocumentMatch) []float64 { + return staticCount +} + +var countSource = &countingSource{} + +func CountMatches() *SingleValueMetric { + return Sum(countSource) +} diff --git a/vendor/github.com/blugelabs/bluge/search/aggregations/duration.go b/vendor/github.com/blugelabs/bluge/search/aggregations/duration.go new file mode 100644 index 000000000..d375d01e0 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/aggregations/duration.go @@ -0,0 +1,54 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregations + +import ( + "time" + + "github.com/blugelabs/bluge/search" +) + +type DurationMetric struct{} + +func Duration() *DurationMetric { + return &DurationMetric{} +} + +func (d *DurationMetric) Fields() []string { + return nil +} + +func (d *DurationMetric) Calculator() search.Calculator { + return &DurationCalculator{ + origin: time.Now(), + } +} + +type DurationCalculator struct { + origin time.Time + since time.Duration +} + +func (d *DurationCalculator) Consume(*search.DocumentMatch) {} + +func (d *DurationCalculator) Finish() { + d.since = time.Since(d.origin) +} + +func (d *DurationCalculator) Merge(other search.Calculator) {} + +func (d *DurationCalculator) Duration() time.Duration { + return d.since +} diff --git a/vendor/github.com/blugelabs/bluge/search/aggregations/filter.go b/vendor/github.com/blugelabs/bluge/search/aggregations/filter.go new file mode 100644 index 000000000..4cf06cec5 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/aggregations/filter.go @@ -0,0 +1,131 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregations + +import ( + "time" + + "github.com/blugelabs/bluge/numeric/geo" + + "github.com/blugelabs/bluge/search" +) + +type FilteringTextSource struct { + source search.TextValuesSource + filter func([]byte) bool +} + +func FilterText(source search.TextValuesSource, filter func([]byte) bool) *FilteringTextSource { + return &FilteringTextSource{ + source: source, + filter: filter, + } +} + +func (f *FilteringTextSource) Fields() []string { + return f.source.Fields() +} + +func (f *FilteringTextSource) Values(match *search.DocumentMatch) [][]byte { + var rv [][]byte + values := f.source.Values(match) + for _, val := range values { + if f.filter(val) { + rv = append(rv, val) + } + } + return rv +} + +type FilteringNumericSource struct { + source search.NumericValuesSource + filter func(float64) bool +} + +func FilterNumeric(source search.NumericValuesSource, filter func(float64) bool) *FilteringNumericSource { + return &FilteringNumericSource{ + source: source, + filter: filter, + } +} + +func (f *FilteringNumericSource) Fields() []string { + return f.source.Fields() +} + +func (f *FilteringNumericSource) Numbers(match *search.DocumentMatch) []float64 { + var rv []float64 + values := f.source.Numbers(match) + for _, val := range values { + if f.filter(val) { + rv = append(rv, val) + } + } + return rv +} + +type FilteringDateSource struct { + source search.DateValuesSource + filter func(time.Time) bool +} + +func FilterDate(source search.DateValuesSource, filter func(time.Time) bool) *FilteringDateSource { + return &FilteringDateSource{ + source: source, + filter: filter, + } +} + +func (f *FilteringDateSource) Fields() []string { + return f.source.Fields() +} + +func (f *FilteringDateSource) Dates(match *search.DocumentMatch) []time.Time { + var rv []time.Time + values := f.source.Dates(match) + for _, val := range values { + if f.filter(val) { + rv = append(rv, val) + } + } + return rv +} + +type FilteringGeoPointSource struct { + source search.GeoPointValuesSource + filter func(*geo.Point) bool +} + +func FilterGeoPoint(source search.GeoPointValuesSource, filter func(*geo.Point) bool) *FilteringGeoPointSource { + return &FilteringGeoPointSource{ + source: source, + filter: filter, + } +} + +func (f *FilteringGeoPointSource) Fields() []string { + return f.source.Fields() +} + +func (f *FilteringGeoPointSource) GeoPoints(match *search.DocumentMatch) []*geo.Point { + var rv []*geo.Point + values := f.source.GeoPoints(match) + for _, val := range values { + if f.filter(val) { + rv = append(rv, val) + } + } + return rv +} diff --git a/vendor/github.com/blugelabs/bluge/search/aggregations/metric.go b/vendor/github.com/blugelabs/bluge/search/aggregations/metric.go new file mode 100644 index 000000000..7afe55687 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/aggregations/metric.go @@ -0,0 +1,173 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregations + +import ( + "math" + + "github.com/blugelabs/bluge/search" +) + +type SingleValueMetric struct { + src search.NumericValuesSource + init float64 + compute SingleValueCalculatorFunc +} + +func Sum(src search.NumericValuesSource) *SingleValueMetric { + return &SingleValueMetric{ + src: src, + compute: func(s *SingleValueCalculator, val float64) { + s.val += val + }, + } +} + +func Min(src search.NumericValuesSource) *SingleValueMetric { + return &SingleValueMetric{ + init: math.Inf(1), + src: src, + compute: func(s *SingleValueCalculator, val float64) { + if val < s.val { + s.val = val + } + }, + } +} + +func Max(src search.NumericValuesSource) *SingleValueMetric { + return MaxStartingAt(src, math.Inf(-1)) +} + +func MaxStartingAt(src search.NumericValuesSource, initial float64) *SingleValueMetric { + return &SingleValueMetric{ + init: initial, + src: src, + compute: func(s *SingleValueCalculator, val float64) { + if val > s.val { + s.val = val + } + }, + } +} + +func (s *SingleValueMetric) Fields() []string { + return s.src.Fields() +} + +func (s *SingleValueMetric) Calculator() search.Calculator { + rv := &SingleValueCalculator{ + val: s.init, + src: s.src, + compute: s.compute, + } + return rv +} + +type SingleValueCalculatorFunc func(*SingleValueCalculator, float64) + +type SingleValueCalculator struct { + src search.NumericValuesSource + val float64 + compute SingleValueCalculatorFunc +} + +func (s *SingleValueCalculator) Consume(d *search.DocumentMatch) { + for _, val := range s.src.Numbers(d) { + s.compute(s, val) + } +} + +func (s *SingleValueCalculator) Merge(other search.Calculator) { + if other, ok := other.(*SingleValueCalculator); ok { + s.compute(s, other.val) + } +} + +func (s *SingleValueCalculator) Finish() {} + +func (s *SingleValueCalculator) Value() float64 { + return s.val +} + +type WeightedAvgMetric struct { + src search.NumericValuesSource + weight search.NumericValuesSource +} + +func Avg(src search.NumericValuesSource) *WeightedAvgMetric { + return &WeightedAvgMetric{ + src: src, + } +} + +func WeightedAvg(src, weight search.NumericValuesSource) *WeightedAvgMetric { + return &WeightedAvgMetric{ + src: src, + weight: weight, + } +} + +func (a *WeightedAvgMetric) Fields() []string { + rv := a.src.Fields() + if a.weight != nil { + rv = append(rv, a.weight.Fields()...) + } + return rv +} + +func (a *WeightedAvgMetric) Calculator() search.Calculator { + rv := &WeightedAvgCalculator{ + src: a.src, + weight: a.weight, + } + return rv +} + +type WeightedAvgCalculator struct { + src search.NumericValuesSource + weight search.NumericValuesSource + val float64 + weights float64 +} + +func (a *WeightedAvgCalculator) Value() float64 { + return a.val / a.weights +} + +func (a *WeightedAvgCalculator) Consume(d *search.DocumentMatch) { + weight := 1.0 + if a.weight != nil { + weightValues := a.weight.Numbers(d) + if len(weightValues) > 0 { + weight = weightValues[0] + } + } + for _, val := range a.src.Numbers(d) { + a.val += val * weight + a.weights += weight + } +} + +func (a *WeightedAvgCalculator) Merge(other search.Calculator) { + if other, ok := other.(*WeightedAvgCalculator); ok { + a.val += other.val + a.weights += other.weights + } +} + +func (a *WeightedAvgCalculator) Finish() { + +} diff --git a/vendor/github.com/blugelabs/bluge/search/aggregations/percentiles.go b/vendor/github.com/blugelabs/bluge/search/aggregations/percentiles.go new file mode 100644 index 000000000..a1d40c567 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/aggregations/percentiles.go @@ -0,0 +1,82 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregations + +import ( + "fmt" + + "github.com/blugelabs/bluge/search" + "github.com/caio/go-tdigest" +) + +type QuantilesMetric struct { + src search.NumericValuesSource + compression float64 +} + +func Quantiles(src search.NumericValuesSource) *QuantilesMetric { + return &QuantilesMetric{ + src: src, + compression: 100, + } +} + +func (c *QuantilesMetric) SetCompression(compression float64) error { + if compression < 1 { + return fmt.Errorf("compression must be > 1") + } + c.compression = compression + return nil +} + +func (c *QuantilesMetric) Fields() []string { + return c.src.Fields() +} + +func (c *QuantilesMetric) Calculator() search.Calculator { + rv := &QuantilesCalculator{ + src: c.src, + } + rv.tdigest, _ = tdigest.New(tdigest.Compression(c.compression)) + return rv +} + +type QuantilesCalculator struct { + src search.NumericValuesSource + tdigest *tdigest.TDigest +} + +func (c *QuantilesCalculator) Quantile(percent float64) (float64, error) { + if percent < 0 || percent > 1 { + return 0, fmt.Errorf("percent must be between 0 and 1") + } + return c.tdigest.Quantile(percent), nil +} + +func (c *QuantilesCalculator) Consume(d *search.DocumentMatch) { + for _, val := range c.src.Numbers(d) { + _ = c.tdigest.Add(val) + } +} + +func (c *QuantilesCalculator) Merge(other search.Calculator) { + if other, ok := other.(*QuantilesCalculator); ok { + _ = c.tdigest.Merge(other.tdigest) + } +} + +func (c *QuantilesCalculator) Finish() { + +} diff --git a/vendor/github.com/blugelabs/bluge/search/aggregations/range.go b/vendor/github.com/blugelabs/bluge/search/aggregations/range.go new file mode 100644 index 000000000..bf74228f3 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/aggregations/range.go @@ -0,0 +1,125 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregations + +import ( + "fmt" + + "github.com/blugelabs/bluge/search" +) + +type RangeAggregation struct { + src search.NumericValuesSource + ranges []*NumericRange + aggregations map[string]search.Aggregation +} + +func Ranges(src search.NumericValuesSource) *RangeAggregation { + return &RangeAggregation{ + src: src, + aggregations: map[string]search.Aggregation{ + "count": CountMatches(), + }, + } +} + +func (a *RangeAggregation) Fields() []string { + return a.src.Fields() +} + +func (a *RangeAggregation) AddRange(rang *NumericRange) *RangeAggregation { + a.ranges = append(a.ranges, rang) + return a +} + +func (a *RangeAggregation) AddAggregation(name string, agg search.Aggregation) *RangeAggregation { + a.aggregations[name] = agg + return a +} + +func (a *RangeAggregation) Calculator() search.Calculator { + rv := &RangeCalculator{ + src: a.src, + ranges: a.ranges, + } + + for _, rang := range a.ranges { + bucketName := rang.name + if bucketName == "" { + bucketName = fmt.Sprintf("[%f,%f)", rang.low, rang.high) + } + newBucket := search.NewBucket(bucketName, a.aggregations) + rv.bucketCalculators = append(rv.bucketCalculators, newBucket) + } + + return rv +} + +type RangeCalculator struct { + src search.NumericValuesSource + ranges []*NumericRange + bucketCalculators []*search.Bucket +} + +func (b *RangeCalculator) Consume(d *search.DocumentMatch) { + for _, val := range b.src.Numbers(d) { + for i, rang := range b.ranges { + if val >= rang.low && val < rang.high { + b.bucketCalculators[i].Consume(d) + } + } + } +} + +func (b *RangeCalculator) Merge(other search.Calculator) { + if other, ok := other.(*RangeCalculator); ok { + if len(b.bucketCalculators) == len(other.bucketCalculators) { + for i := range b.bucketCalculators { + b.bucketCalculators[i].Merge(other.bucketCalculators[i]) + } + } + } +} + +func (b *RangeCalculator) Finish() { + for _, rang := range b.bucketCalculators { + rang.Finish() + } +} + +func (b *RangeCalculator) Buckets() []*search.Bucket { + return b.bucketCalculators +} + +type NumericRange struct { + name string + low float64 + high float64 +} + +func Range(low, high float64) *NumericRange { + return &NumericRange{ + low: low, + high: high, + } +} + +func NamedRange(name string, low, high float64) *NumericRange { + return &NumericRange{ + name: name, + low: low, + high: high, + } +} diff --git a/vendor/github.com/blugelabs/bluge/search/aggregations/range_date.go b/vendor/github.com/blugelabs/bluge/search/aggregations/range_date.go new file mode 100644 index 000000000..a731001a7 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/aggregations/range_date.go @@ -0,0 +1,130 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregations + +import ( + "fmt" + "time" + + "github.com/blugelabs/bluge/search" +) + +type DateRangeAggregation struct { + src search.DateValuesSource + ranges []*DateRange + aggregations map[string]search.Aggregation +} + +func DateRanges(src search.DateValuesSource) *DateRangeAggregation { + return &DateRangeAggregation{ + src: src, + aggregations: map[string]search.Aggregation{ + "count": CountMatches(), + }, + } +} + +func (a *DateRangeAggregation) Fields() []string { + return a.src.Fields() +} + +func (a *DateRangeAggregation) AddRange(rang *DateRange) *DateRangeAggregation { + a.ranges = append(a.ranges, rang) + return a +} + +func (a *DateRangeAggregation) AddAggregation(name string, agg search.Aggregation) *DateRangeAggregation { + a.aggregations[name] = agg + return a +} + +func (a *DateRangeAggregation) Calculator() search.Calculator { + rv := &DateRangeCalculator{ + src: a.src, + ranges: a.ranges, + } + + for _, rang := range a.ranges { + bucketName := rang.name + if bucketName == "" { + bucketName = fmt.Sprintf("[%s,%s)", rang.start.Format(time.RFC3339), rang.end.Format(time.RFC3339)) + } + newBucket := search.NewBucket(bucketName, a.aggregations) + rv.bucketCalculators = append(rv.bucketCalculators, newBucket) + } + + return rv +} + +type DateRangeCalculator struct { + src search.DateValuesSource + ranges []*DateRange + bucketCalculators []*search.Bucket +} + +func (b *DateRangeCalculator) Consume(d *search.DocumentMatch) { + for _, val := range b.src.Dates(d) { + for i, rang := range b.ranges { + if !rang.start.IsZero() && val.Before(rang.start) { + continue + } + if !rang.end.IsZero() && (val.Equal(rang.end) || val.After(rang.end)) { + continue + } + b.bucketCalculators[i].Consume(d) + } + } +} + +func (b *DateRangeCalculator) Merge(other search.Calculator) { + if other, ok := other.(*DateRangeCalculator); ok { + if len(b.bucketCalculators) == len(other.bucketCalculators) { + for i := range b.bucketCalculators { + b.bucketCalculators[i].Merge(other.bucketCalculators[i]) + } + } + } +} + +func (b *DateRangeCalculator) Finish() { + for _, rang := range b.bucketCalculators { + rang.Finish() + } +} + +func (b *DateRangeCalculator) Buckets() []*search.Bucket { + return b.bucketCalculators +} + +type DateRange struct { + name string + start time.Time + end time.Time +} + +func NewDateRange(start, end time.Time) *DateRange { + return &DateRange{ + start: start, + end: end, + } +} + +func NewNamedDateRange(name string, start, end time.Time) *DateRange { + return &DateRange{ + name: name, + start: start, + end: end, + } +} diff --git a/vendor/github.com/blugelabs/bluge/search/aggregations/terms.go b/vendor/github.com/blugelabs/bluge/search/aggregations/terms.go new file mode 100644 index 000000000..ecacef10a --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/aggregations/terms.go @@ -0,0 +1,168 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregations + +import ( + "sort" + + "github.com/blugelabs/bluge/search" +) + +type TermsAggregation struct { + src search.TextValuesSource + size int + + aggregations map[string]search.Aggregation + + lessFunc func(a, b *search.Bucket) bool + desc bool + sortFunc func(p sort.Interface) +} + +func NewTermsAggregation(src search.TextValuesSource, size int) *TermsAggregation { + rv := &TermsAggregation{ + src: src, + size: size, + desc: true, + lessFunc: func(a, b *search.Bucket) bool { + return a.Aggregations()["count"].(search.MetricCalculator).Value() < b.Aggregations()["count"].(search.MetricCalculator).Value() + }, + aggregations: make(map[string]search.Aggregation), + sortFunc: sort.Sort, + } + rv.aggregations["count"] = CountMatches() + return rv +} + +func (t *TermsAggregation) Fields() []string { + rv := t.src.Fields() + for _, agg := range t.aggregations { + rv = append(rv, agg.Fields()...) + } + return rv +} + +func (t *TermsAggregation) AddAggregation(name string, aggregation search.Aggregation) { + t.aggregations[name] = aggregation +} + +func (t *TermsAggregation) Calculator() search.Calculator { + return &TermsCalculator{ + src: t.src, + size: t.size, + aggregations: t.aggregations, + desc: t.desc, + lessFunc: t.lessFunc, + sortFunc: t.sortFunc, + bucketsMap: make(map[string]*search.Bucket), + } +} + +type TermsCalculator struct { + src search.TextValuesSource + size int + + aggregations map[string]search.Aggregation + + bucketsList []*search.Bucket + bucketsMap map[string]*search.Bucket + total int + other int + + desc bool + lessFunc func(a, b *search.Bucket) bool + sortFunc func(p sort.Interface) +} + +func (a *TermsCalculator) Consume(d *search.DocumentMatch) { + a.total++ + for _, term := range a.src.Values(d) { + termStr := string(term) + bucket, ok := a.bucketsMap[termStr] + if ok { + bucket.Consume(d) + } else { + newBucket := search.NewBucket(termStr, a.aggregations) + newBucket.Consume(d) + a.bucketsMap[termStr] = newBucket + a.bucketsList = append(a.bucketsList, newBucket) + } + } +} + +func (a *TermsCalculator) Merge(other search.Calculator) { + if other, ok := other.(*TermsCalculator); ok { + // first sum to the totals and others + a.total += other.total + // now, walk all of the other buckets + // if we have a local match, merge otherwise append + for i := range other.bucketsList { + var foundLocal bool + for j := range a.bucketsList { + if other.bucketsList[i].Name() == a.bucketsList[j].Name() { + a.bucketsList[j].Merge(other.bucketsList[i]) + foundLocal = true + } + } + if !foundLocal { + a.bucketsList = append(a.bucketsList, other.bucketsList[i]) + } + } + // now re-invoke finish, this should trim to correct size again + // and recalculate other + a.Finish() + } +} + +func (a *TermsCalculator) Finish() { + // sort the buckets + if a.desc { + a.sortFunc(sort.Reverse(a)) + } else { + a.sortFunc(a) + } + + trimTopN := a.size + if trimTopN > len(a.bucketsList) { + trimTopN = len(a.bucketsList) + } + a.bucketsList = a.bucketsList[:trimTopN] + + var notOther int + for _, bucket := range a.bucketsList { + notOther += int(bucket.Aggregations()["count"].(search.MetricCalculator).Value()) + } + a.other = a.total - notOther +} + +func (a *TermsCalculator) Buckets() []*search.Bucket { + return a.bucketsList +} + +func (a *TermsCalculator) Other() int { + return a.other +} + +func (a *TermsCalculator) Len() int { + return len(a.bucketsList) +} + +func (a *TermsCalculator) Less(i, j int) bool { + return a.lessFunc(a.bucketsList[i], a.bucketsList[j]) +} + +func (a *TermsCalculator) Swap(i, j int) { + a.bucketsList[i], a.bucketsList[j] = a.bucketsList[j], a.bucketsList[i] +} diff --git a/vendor/github.com/blugelabs/bluge/search/collector.go b/vendor/github.com/blugelabs/bluge/search/collector.go new file mode 100644 index 000000000..85e2066e7 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/collector.go @@ -0,0 +1,37 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package search + +import ( + "context" +) + +type Collector interface { + Collect(context.Context, Aggregations, Collectible) (DocumentMatchIterator, error) + + Size() int + BackingSize() int +} + +type Collectible interface { + Next(ctx *Context) (*DocumentMatch, error) + DocumentMatchPoolSize() int + Close() error +} + +type DocumentMatchIterator interface { + Next() (*DocumentMatch, error) + Aggregations() *Bucket +} diff --git a/vendor/github.com/blugelabs/bluge/search/collector/all.go b/vendor/github.com/blugelabs/bluge/search/collector/all.go new file mode 100644 index 000000000..3cc606514 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/collector/all.go @@ -0,0 +1,107 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + + "github.com/blugelabs/bluge/search" +) + +type AllCollector struct { +} + +func NewAllCollector() *AllCollector { + return &AllCollector{} +} + +func (a *AllCollector) Collect(ctx context.Context, aggs search.Aggregations, + searcher search.Collectible) (search.DocumentMatchIterator, error) { + return &AllIterator{ + ctx: ctx, + neededFields: aggs.Fields(), + bucket: search.NewBucket("", aggs), + searcher: searcher, + searchContext: search.NewSearchContext(searcher.DocumentMatchPoolSize(), 0), + }, nil +} + +func (a *AllCollector) Size() int { + return 0 +} + +func (a *AllCollector) BackingSize() int { + return 0 +} + +type AllIterator struct { + ctx context.Context + neededFields []string + bucket *search.Bucket + hitNumber int + searcher search.Collectible + searchContext *search.Context + done bool +} + +func (a *AllIterator) doneCleanup() { + a.done = true + _ = a.searcher.Close() +} + +func (a *AllIterator) Next() (next *search.DocumentMatch, err error) { + if a.done { + return nil, nil + } + if a.hitNumber%CheckDoneEvery == 0 { + select { + case <-a.ctx.Done(): + a.doneCleanup() + return nil, a.ctx.Err() + default: + } + } + + next, err = a.searcher.Next(a.searchContext) + if err != nil { + a.doneCleanup() + return nil, err + } + + if next == nil { + a.bucket.Finish() + a.doneCleanup() + return nil, nil + } + + a.hitNumber++ + next.HitNumber = a.hitNumber + + if len(a.neededFields) > 0 { + err = next.LoadDocumentValues(a.searchContext, a.neededFields) + if err != nil { + a.doneCleanup() + return nil, err + } + } + // calculate aggregations + a.bucket.Consume(next) + + return next, nil +} + +func (a *AllIterator) Aggregations() *search.Bucket { + return a.bucket +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/collector/heap.go b/vendor/github.com/blugelabs/bluge/search/collector/heap.go similarity index 96% rename from vendor/github.com/blevesearch/bleve/v2/search/collector/heap.go rename to vendor/github.com/blugelabs/bluge/search/collector/heap.go index 9503f0060..4c4225fa5 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/collector/heap.go +++ b/vendor/github.com/blugelabs/bluge/search/collector/heap.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ package collector import ( "container/heap" - "github.com/blevesearch/bleve/v2/search" + "github.com/blugelabs/bluge/search" ) type collectStoreHeap struct { diff --git a/vendor/github.com/blugelabs/bluge/search/collector/iterator.go b/vendor/github.com/blugelabs/bluge/search/collector/iterator.go new file mode 100644 index 000000000..0e0e418f6 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/collector/iterator.go @@ -0,0 +1,42 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "github.com/blugelabs/bluge/search" +) + +type TopNIterator struct { + results search.DocumentMatchCollection + bucket *search.Bucket + index int + err error +} + +func (i *TopNIterator) Next() (*search.DocumentMatch, error) { + if i.err != nil { + return nil, i.err + } + if i.index < len(i.results) { + rv := i.results[i.index] + i.index++ + return rv, nil + } + return nil, nil +} + +func (i *TopNIterator) Aggregations() *search.Bucket { + return i.bucket +} diff --git a/vendor/github.com/blevesearch/bleve_index_api/sizes.go b/vendor/github.com/blugelabs/bluge/search/collector/size.go similarity index 77% rename from vendor/github.com/blevesearch/bleve_index_api/sizes.go rename to vendor/github.com/blugelabs/bluge/search/collector/size.go index 65c2dd000..5218121d3 100644 --- a/vendor/github.com/blevesearch/bleve_index_api/sizes.go +++ b/vendor/github.com/blugelabs/bluge/search/collector/size.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Couchbase, Inc. +// Copyright (c) 2020 The Bluge Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,24 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -package index +package collector import ( "reflect" ) func init() { - var m map[int]int - sizeOfMap = int(reflect.TypeOf(m).Size()) var ptr *int sizeOfPtr = int(reflect.TypeOf(ptr).Size()) var str string sizeOfString = int(reflect.TypeOf(str).Size()) - var u64 uint64 - sizeOfUint64 = int(reflect.TypeOf(u64).Size()) + var coll TopNCollector + reflectStaticSizeTopNCollector = int(reflect.TypeOf(coll).Size()) } -var sizeOfMap int var sizeOfPtr int var sizeOfString int -var sizeOfUint64 int +var reflectStaticSizeTopNCollector int diff --git a/vendor/github.com/blevesearch/bleve/v2/search/collector/slice.go b/vendor/github.com/blugelabs/bluge/search/collector/slice.go similarity index 95% rename from vendor/github.com/blevesearch/bleve/v2/search/collector/slice.go rename to vendor/github.com/blugelabs/bluge/search/collector/slice.go index b38d9abc4..1807b6821 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/collector/slice.go +++ b/vendor/github.com/blugelabs/bluge/search/collector/slice.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ package collector -import "github.com/blevesearch/bleve/v2/search" +import "github.com/blugelabs/bluge/search" type collectStoreSlice struct { slice search.DocumentMatchCollection diff --git a/vendor/github.com/blugelabs/bluge/search/collector/topn.go b/vendor/github.com/blugelabs/bluge/search/collector/topn.go new file mode 100644 index 000000000..e94e5fde3 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/collector/topn.go @@ -0,0 +1,264 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + + "github.com/blugelabs/bluge/search" +) + +type collectorStore interface { + // Add the document, and if the new store size exceeds the provided size + // the last element is removed and returned. If the size has not been + // exceeded, nil is returned. + AddNotExceedingSize(doc *search.DocumentMatch, size int) *search.DocumentMatch + + Final(skip int, fixup collectorFixup) (search.DocumentMatchCollection, error) +} + +// PreAllocSizeSkipCap will cap preallocation to this amount when +// size+skip exceeds this value +var PreAllocSizeSkipCap = 1000 + +type collectorCompare func(i, j *search.DocumentMatch) int + +type collectorFixup func(d *search.DocumentMatch) error + +// TopNCollector collects the top N hits, optionally skipping some results +type TopNCollector struct { + size int + skip int + sort search.SortOrder + results search.DocumentMatchCollection + reverse bool + backingSize int + + store collectorStore + + neededFields []string + + lowestMatchOutsideResults *search.DocumentMatch + searchAfter *search.DocumentMatch +} + +// CheckDoneEvery controls how frequently we check the context deadline +const CheckDoneEvery = 1024 + +// NewTopNCollector builds a collector to find the top 'size' hits +// skipping over the first 'skip' hits +// ordering hits by the provided sort order +func NewTopNCollector(size, skip int, sort search.SortOrder) *TopNCollector { + return newTopNCollector(size, skip, sort, false) +} + +// NewTopNCollector builds a collector to find the top 'size' hits +// skipping over the first 'skip' hits +// ordering hits by the provided sort order +func NewTopNCollectorAfter(size int, sort search.SortOrder, after [][]byte, reverse bool) *TopNCollector { + rv := newTopNCollector(size, 0, sort, reverse) + rv.searchAfter = &search.DocumentMatch{ + SortValue: after, + } + + return rv +} + +const switchFromSliceToHeap = 10 + +func newTopNCollector(size, skip int, sort search.SortOrder, reverse bool) *TopNCollector { + hc := &TopNCollector{ + size: size, + skip: skip, + sort: sort, + reverse: reverse, + } + + // pre-allocate space on the store to avoid reslicing + // unless the size + skip is too large, then cap it + // everything should still work, just reslices as necessary + hc.backingSize = size + skip + 1 + if size+skip > PreAllocSizeSkipCap { + hc.backingSize = PreAllocSizeSkipCap + 1 + } + + if size+skip > switchFromSliceToHeap { + hc.store = newStoreHeap(hc.backingSize, func(i, j *search.DocumentMatch) int { + return hc.sort.Compare(i, j) + }) + } else { + hc.store = newStoreSlice(hc.backingSize, func(i, j *search.DocumentMatch) int { + return hc.sort.Compare(i, j) + }) + } + + // these lookups traverse an interface, so do once up-front + hc.neededFields = sort.Fields() + + return hc +} + +func (hc *TopNCollector) Size() int { + sizeInBytes := reflectStaticSizeTopNCollector + sizeOfPtr + + for _, entry := range hc.neededFields { + sizeInBytes += len(entry) + sizeOfString + } + + return sizeInBytes +} + +func (hc *TopNCollector) BackingSize() int { + return hc.backingSize +} + +// Collect goes to the index to find the matching documents +func (hc *TopNCollector) Collect(ctx context.Context, aggs search.Aggregations, + searcher search.Collectible) (search.DocumentMatchIterator, error) { + var err error + var next *search.DocumentMatch + + // ensure that we always close the searcher + defer func() { + _ = searcher.Close() + }() + + searchContext := search.NewSearchContext(hc.backingSize+searcher.DocumentMatchPoolSize(), len(hc.sort)) + + // add fields needed by aggregations + hc.neededFields = append(hc.neededFields, aggs.Fields()...) + bucket := search.NewBucket("", aggs) + + var hitNumber int + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + next, err = searcher.Next(searchContext) + } + for err == nil && next != nil { + if hitNumber%CheckDoneEvery == 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + } + + hitNumber++ + next.HitNumber = hitNumber + + err = hc.collectSingle(searchContext, next, bucket) + if err != nil { + return nil, err + } + + next, err = searcher.Next(searchContext) + } + if err != nil { + return nil, err + } + + bucket.Finish() + + // finalize actual results + err = hc.finalizeResults() + if err != nil { + return nil, err + } + + rv := &TopNIterator{ + results: hc.results, + bucket: bucket, + index: 0, + err: nil, + } + return rv, nil +} + +func (hc *TopNCollector) collectSingle(ctx *search.Context, d *search.DocumentMatch, bucket *search.Bucket) error { + var err error + + if len(hc.neededFields) > 0 { + err = d.LoadDocumentValues(ctx, hc.neededFields) + if err != nil { + return err + } + } + + // compute this hits sort value + hc.sort.Compute(d) + + // calculate aggregations + bucket.Consume(d) + + // support search after based pagination, + // if this hit is <= the search after sort key + // we should skip it + if hc.searchAfter != nil { + // exact sort order matches use hit number to break tie + // but we want to allow for exact match, so we pretend + hc.searchAfter.HitNumber = d.HitNumber + if hc.sort.Compare(d, hc.searchAfter) <= 0 { + return nil + } + } + + // optimization, we track lowest sorting hit already removed from heap + // with this one comparison, we can avoid all heap operations if + // this hit would have been added and then immediately removed + if hc.lowestMatchOutsideResults != nil { + cmp := hc.sort.Compare(d, hc.lowestMatchOutsideResults) + if cmp >= 0 { + // this hit can't possibly be in the result set, so avoid heap ops + ctx.DocumentMatchPool.Put(d) + return nil + } + } + + removed := hc.store.AddNotExceedingSize(d, hc.size+hc.skip) + if removed != nil { + if hc.lowestMatchOutsideResults == nil { + hc.lowestMatchOutsideResults = removed + } else { + cmp := hc.sort.Compare(removed, hc.lowestMatchOutsideResults) + if cmp < 0 { + tmp := hc.lowestMatchOutsideResults + hc.lowestMatchOutsideResults = removed + ctx.DocumentMatchPool.Put(tmp) + } + } + } + return nil +} + +// finalizeResults starts with the heap containing the final top size+skip +// it now throws away the results to be skipped +// and does final doc id lookup (if necessary) +func (hc *TopNCollector) finalizeResults() error { + var err error + hc.results, err = hc.store.Final(hc.skip, func(doc *search.DocumentMatch) error { + doc.Complete(nil) + return nil + }) + + if hc.reverse { + for i, j := 0, len(hc.results)-1; i < j; i, j = i+1, j-1 { + hc.results[i], hc.results[j] = hc.results[j], hc.results[i] + } + } + + return err +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/explanation.go b/vendor/github.com/blugelabs/bluge/search/explanation.go similarity index 66% rename from vendor/github.com/blevesearch/bleve/v2/search/explanation.go rename to vendor/github.com/blugelabs/bluge/search/explanation.go index b1ac29aa8..60cb8411c 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/explanation.go +++ b/vendor/github.com/blugelabs/bluge/search/explanation.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,37 +17,35 @@ package search import ( "encoding/json" "fmt" - "reflect" - - "github.com/blevesearch/bleve/v2/size" ) -var reflectStaticSizeExplanation int - -func init() { - var e Explanation - reflectStaticSizeExplanation = int(reflect.TypeOf(e).Size()) -} - type Explanation struct { Value float64 `json:"value"` Message string `json:"message"` Children []*Explanation `json:"children,omitempty"` } -func (expl *Explanation) String() string { - js, err := json.MarshalIndent(expl, "", " ") +func NewExplanation(value float64, msg string, children ...*Explanation) *Explanation { + return &Explanation{ + Value: value, + Message: msg, + Children: children, + } +} + +func (e *Explanation) String() string { + js, err := json.MarshalIndent(e, "", " ") if err != nil { return fmt.Sprintf("error serializing explanation to json: %v", err) } return string(js) } -func (expl *Explanation) Size() int { - sizeInBytes := reflectStaticSizeExplanation + size.SizeOfPtr + - len(expl.Message) +func (e *Explanation) Size() int { + sizeInBytes := reflectStaticSizeExplanation + sizeOfPtr + + len(e.Message) - for _, entry := range expl.Children { + for _, entry := range e.Children { sizeInBytes += entry.Size() } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/pool.go b/vendor/github.com/blugelabs/bluge/search/pool.go similarity index 86% rename from vendor/github.com/blevesearch/bleve/v2/search/pool.go rename to vendor/github.com/blugelabs/bluge/search/pool.go index ba8be8fc2..c1371e282 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/pool.go +++ b/vendor/github.com/blugelabs/bluge/search/pool.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,17 +14,6 @@ package search -import ( - "reflect" -) - -var reflectStaticSizeDocumentMatchPool int - -func init() { - var dmp DocumentMatchPool - reflectStaticSizeDocumentMatchPool = int(reflect.TypeOf(dmp).Size()) -} - // DocumentMatchPoolTooSmall is a callback function that can be executed // when the DocumentMatchPool does not have sufficient capacity // By default we just perform just-in-time allocation, but you could log @@ -47,18 +36,18 @@ func defaultDocumentMatchPoolTooSmall(p *DocumentMatchPool) *DocumentMatch { // NewDocumentMatchPool will build a DocumentMatchPool with memory // pre-allocated to accommodate the requested number of DocumentMatch // instances -func NewDocumentMatchPool(size, sortsize int) *DocumentMatchPool { +func NewDocumentMatchPool(size, sortSize int) *DocumentMatchPool { avail := make(DocumentMatchCollection, size) // pre-allocate the expected number of instances startBlock := make([]DocumentMatch, size) - startSorts := make([]string, size*sortsize) + startSorts := make([][]byte, size*sortSize) // make these initial instances available i, j := 0, 0 for i < size { avail[i] = &startBlock[i] - avail[i].Sort = startSorts[j:j] - i += 1 - j += sortsize + avail[i].SortValue = startSorts[j:j] + i++ + j += sortSize } return &DocumentMatchPool{ avail: avail, diff --git a/vendor/github.com/blugelabs/bluge/search/search.go b/vendor/github.com/blugelabs/bluge/search/search.go new file mode 100644 index 000000000..3e1de027c --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/search.go @@ -0,0 +1,372 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package search + +import ( + "fmt" + "sort" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blugelabs/bluge/analysis" +) + +type Location struct { + Pos int + Start int + End int +} + +func (l *Location) Size() int { + return reflectStaticSizeLocation +} + +type Locations []*Location + +func (p Locations) Len() int { return len(p) } +func (p Locations) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p Locations) Less(i, j int) bool { + return p[i].Pos < p[j].Pos +} + +func (p Locations) Dedupe() Locations { // destructive! + if len(p) <= 1 { + return p + } + + sort.Sort(p) + + slow := 0 + + for _, pfast := range p { + pslow := p[slow] + if pslow.Pos == pfast.Pos && + pslow.Start == pfast.Start && + pslow.End == pfast.End { + continue // duplicate, so only move fast ahead + } + + slow++ + + p[slow] = pfast + } + + return p[:slow+1] +} + +type TermLocationMap map[string]Locations + +func (t TermLocationMap) AddLocation(term string, location *Location) { + t[term] = append(t[term], location) +} + +type FieldTermLocationMap map[string]TermLocationMap + +type FieldTermLocation struct { + Field string + Term string + Location Location +} + +type FieldFragmentMap map[string][]string + +type DocumentMatch struct { + reader MatchReader + Number uint64 + Score float64 + Explanation *Explanation + Locations FieldTermLocationMap + SortValue [][]byte + + docValues map[string][][]byte + + // used to maintain natural index order + HitNumber int + + // used to temporarily hold field term location information during + // search processing in an efficient, recycle-friendly manner, to + // be later incorporated into the Locations map when search + // results are completed + FieldTermLocations []FieldTermLocation +} + +func (dm *DocumentMatch) SetReader(r MatchReader) { + dm.reader = r +} + +func (dm *DocumentMatch) addDocValue(name string, value []byte) { + if dm.docValues == nil { + dm.docValues = make(map[string][][]byte) + } + dm.docValues[name] = append(dm.docValues[name], value) +} + +func (dm *DocumentMatch) LoadDocumentValues(ctx *Context, fields []string) error { + dvReader, err := ctx.DocValueReaderForReader(dm.reader, fields) + if err != nil { + return err + } + + return dvReader.VisitDocumentValues(dm.Number, dm.addDocValue) +} + +func (dm *DocumentMatch) DocValues(field string) [][]byte { + if dm.docValues != nil { + return dm.docValues[field] + } + return nil +} + +func (dm *DocumentMatch) VisitStoredFields(visitor segment.StoredFieldVisitor) error { + return dm.reader.VisitStoredFields(dm.Number, visitor) +} + +// Reset allows an already allocated DocumentMatch to be reused +func (dm *DocumentMatch) Reset() *DocumentMatch { + // remember the [][]byte used for sort + sortValue := dm.SortValue + // remember the FieldTermLocations backing array + ftls := dm.FieldTermLocations + // idiom to copy over from empty DocumentMatch (0 allocations) + *dm = DocumentMatch{} + // reuse the [][]byte already allocated (and reset len to 0) + dm.SortValue = sortValue[:0] + // reuse the FieldTermLocations already allocated (and reset len to 0) + dm.FieldTermLocations = ftls[:0] + return dm +} + +func (dm *DocumentMatch) Size() int { + sizeInBytes := reflectStaticSizeDocumentMatch + sizeOfPtr + + if dm.Explanation != nil { + sizeInBytes += dm.Explanation.Size() + } + + for k, v := range dm.Locations { + sizeInBytes += sizeOfString + len(k) + for k1, v1 := range v { + sizeInBytes += sizeOfString + len(k1) + + sizeOfSlice + for _, entry := range v1 { + sizeInBytes += entry.Size() + } + } + } + + for _, entry := range dm.SortValue { + sizeInBytes += sizeOfSlice + len(entry) + } + + return sizeInBytes +} + +// Complete performs final preparation & transformation of the +// DocumentMatch at the end of search processing, also allowing the +// caller to provide an optional preallocated locations slice +func (dm *DocumentMatch) Complete(prealloc []Location) []Location { + // transform the FieldTermLocations slice into the Locations map + nlocs := len(dm.FieldTermLocations) + if nlocs > 0 { + if cap(prealloc) < nlocs { + prealloc = make([]Location, nlocs) + } + prealloc = prealloc[:nlocs] + + var lastField string + var tlm TermLocationMap + var needsDedupe bool + + for i, ftl := range dm.FieldTermLocations { + if lastField != ftl.Field { + lastField = ftl.Field + + if dm.Locations == nil { + dm.Locations = make(FieldTermLocationMap) + } + + tlm = dm.Locations[ftl.Field] + if tlm == nil { + tlm = make(TermLocationMap) + dm.Locations[ftl.Field] = tlm + } + } + + loc := &prealloc[i] + *loc = ftl.Location + + locs := tlm[ftl.Term] + + // if the loc is before or at the last location, then there + // might be duplicates that need to be deduplicated + if !needsDedupe && len(locs) > 0 { + last := locs[len(locs)-1] + if loc.Pos <= last.Pos { + needsDedupe = true + } + } + + tlm[ftl.Term] = append(locs, loc) + + dm.FieldTermLocations[i] = FieldTermLocation{ // recycle + Location: Location{}, + } + } + + if needsDedupe { + for _, tlm := range dm.Locations { + for term, locs := range tlm { + tlm[term] = locs.Dedupe() + } + } + } + } + + dm.FieldTermLocations = dm.FieldTermLocations[:0] // recycle + + return prealloc +} + +func (dm *DocumentMatch) String() string { + return fmt.Sprintf("[%d-%f]", dm.Number, dm.Score) +} + +type DocumentMatchCollection []*DocumentMatch + +func (c DocumentMatchCollection) Len() int { return len(c) } +func (c DocumentMatchCollection) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c DocumentMatchCollection) Less(i, j int) bool { return c[i].Score > c[j].Score } + +type Searcher interface { + Next(ctx *Context) (*DocumentMatch, error) + Advance(ctx *Context, number uint64) (*DocumentMatch, error) + Close() error + Count() uint64 + Min() int + Size() int + + DocumentMatchPoolSize() int +} + +type SearcherOptions struct { + SimilarityForField func(field string) Similarity + DefaultSearchField string + DefaultAnalyzer *analysis.Analyzer + Explain bool + IncludeTermVectors bool + Score string +} + +// Context represents the context around a single search +type Context struct { + DocumentMatchPool *DocumentMatchPool + dvReaders map[DocumentValueReadable]segment.DocumentValueReader +} + +func NewSearchContext(size, sortSize int) *Context { + return &Context{ + DocumentMatchPool: NewDocumentMatchPool(size, sortSize), + dvReaders: make(map[DocumentValueReadable]segment.DocumentValueReader), + } +} + +func (sc *Context) DocValueReaderForReader(r DocumentValueReadable, fields []string) (segment.DocumentValueReader, error) { + dvReader := sc.dvReaders[r] + if dvReader == nil { + var err error + dvReader, err = r.DocumentValueReader(fields) + if err != nil { + return nil, err + } + sc.dvReaders[r] = dvReader + } + return dvReader, nil +} + +func (sc *Context) Size() int { + sizeInBytes := reflectStaticSizeSearchContext + sizeOfPtr + + reflectStaticSizeDocumentMatchPool + sizeOfPtr + + if sc.DocumentMatchPool != nil { + for _, entry := range sc.DocumentMatchPool.avail { + if entry != nil { + sizeInBytes += entry.Size() + } + } + } + + return sizeInBytes +} + +type DocumentValueReadable interface { + // DocumentValueReader provides a way to find all of the document + // values stored in the specified fields. The returned + // DocumentValueReader provides a means to visit specific document + // numbers. + DocumentValueReader(fields []string) (segment.DocumentValueReader, error) +} + +type StoredFieldVisitable interface { + // VisitStoredFields will call the visitor for each stored field + // of the specified document number. + VisitStoredFields(number uint64, visitor segment.StoredFieldVisitor) error +} + +type MatchReader interface { + DocumentValueReadable + StoredFieldVisitable +} + +type Reader interface { + DocumentValueReadable + + StoredFieldVisitable + + CollectionStats(field string) (segment.CollectionStats, error) + + // DictionaryLookup provides a way to quickly determine if a term is + // in the dictionary for the specified field. + DictionaryLookup(field string) (segment.DictionaryLookup, error) + + // DictionaryIterator provides a way to explore the terms used in the + // specified field. You can optionally filter these terms + // by the provided Automaton, or start/end terms. + DictionaryIterator(field string, automaton segment.Automaton, start, + end []byte) (segment.DictionaryIterator, error) + + // PostingsIterator provides a way to find information about all documents + // that use the specified term in the specified field. + PostingsIterator(term []byte, field string, includeFreq, includeNorm, + includeTermVectors bool) (segment.PostingsIterator, error) + + // Close releases all resources associated with this Reader + Close() error +} + +type Similarity interface { + ComputeNorm(numTerms int) float32 + Scorer(boost float64, collectionStats segment.CollectionStats, termStats segment.TermStats) Scorer +} + +type Scorer interface { + Score(freq int, norm float64) float64 + Explain(freq int, norm float64) *Explanation +} + +type CompositeScorer interface { + ScoreComposite(constituents []*DocumentMatch) float64 + ExplainComposite(constituents []*DocumentMatch) *Explanation +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/ordered_searchers_list.go b/vendor/github.com/blugelabs/bluge/search/searcher/ordered_searchers_list.go similarity index 91% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/ordered_searchers_list.go rename to vendor/github.com/blugelabs/bluge/search/searcher/ordered_searchers_list.go index f3e646e9d..0107c7f8a 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/ordered_searchers_list.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/ordered_searchers_list.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ package searcher import ( - "github.com/blevesearch/bleve/v2/search" + "github.com/blugelabs/bluge/search" ) type OrderedSearcherList []search.Searcher diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_boolean.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_boolean.go similarity index 52% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_boolean.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_boolean.go index ef9093c20..1e33f50be 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_boolean.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_boolean.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,55 +15,40 @@ package searcher import ( - "math" - "reflect" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/scorer" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" + "github.com/blugelabs/bluge/search" ) -var reflectStaticSizeBooleanSearcher int - -func init() { - var bs BooleanSearcher - reflectStaticSizeBooleanSearcher = int(reflect.TypeOf(bs).Size()) -} - type BooleanSearcher struct { - indexReader index.IndexReader mustSearcher search.Searcher shouldSearcher search.Searcher mustNotSearcher search.Searcher - queryNorm float64 currMust *search.DocumentMatch currShould *search.DocumentMatch currMustNot *search.DocumentMatch - currentID index.IndexInternalID - min uint64 - scorer *scorer.ConjunctionQueryScorer + currentMatch *search.DocumentMatch + scorer search.CompositeScorer matches []*search.DocumentMatch initialized bool done bool + options search.SearcherOptions } -func NewBooleanSearcher(indexReader index.IndexReader, mustSearcher search.Searcher, shouldSearcher search.Searcher, mustNotSearcher search.Searcher, options search.SearcherOptions) (*BooleanSearcher, error) { +func NewBooleanSearcher(mustSearcher, shouldSearcher, mustNotSearcher search.Searcher, + scorer search.CompositeScorer, options search.SearcherOptions) (*BooleanSearcher, error) { // build our searcher rv := BooleanSearcher{ - indexReader: indexReader, mustSearcher: mustSearcher, shouldSearcher: shouldSearcher, mustNotSearcher: mustNotSearcher, - scorer: scorer.NewConjunctionQueryScorer(options), + scorer: scorer, matches: make([]*search.DocumentMatch, 2), + options: options, } - rv.computeQueryNorm() return &rv, nil } func (s *BooleanSearcher) Size() int { - sizeInBytes := reflectStaticSizeBooleanSearcher + size.SizeOfPtr + sizeInBytes := reflectStaticSizeBooleanSearcher + sizeOfPtr if s.mustSearcher != nil { sizeInBytes += s.mustSearcher.Size() @@ -77,8 +62,6 @@ func (s *BooleanSearcher) Size() int { sizeInBytes += s.mustNotSearcher.Size() } - sizeInBytes += s.scorer.Size() - for _, entry := range s.matches { if entry != nil { sizeInBytes += entry.Size() @@ -88,28 +71,7 @@ func (s *BooleanSearcher) Size() int { return sizeInBytes } -func (s *BooleanSearcher) computeQueryNorm() { - // first calculate sum of squared weights - sumOfSquaredWeights := 0.0 - if s.mustSearcher != nil { - sumOfSquaredWeights += s.mustSearcher.Weight() - } - if s.shouldSearcher != nil { - sumOfSquaredWeights += s.shouldSearcher.Weight() - } - - // now compute query norm from this - s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights) - // finally tell all the downstream searchers the norm - if s.mustSearcher != nil { - s.mustSearcher.SetQueryNorm(s.queryNorm) - } - if s.shouldSearcher != nil { - s.shouldSearcher.SetQueryNorm(s.queryNorm) - } -} - -func (s *BooleanSearcher) initSearchers(ctx *search.SearchContext) error { +func (s *BooleanSearcher) initSearchers(ctx *search.Context) error { var err error // get all searchers pointing at their first match if s.mustSearcher != nil { @@ -143,18 +105,18 @@ func (s *BooleanSearcher) initSearchers(ctx *search.SearchContext) error { } if s.mustSearcher != nil && s.currMust != nil { - s.currentID = s.currMust.IndexInternalID + s.currentMatch = s.currMust } else if s.mustSearcher == nil && s.currShould != nil { - s.currentID = s.currShould.IndexInternalID + s.currentMatch = s.currShould } else { - s.currentID = nil + s.currentMatch = nil } s.initialized = true return nil } -func (s *BooleanSearcher) advanceNextMust(ctx *search.SearchContext, skipReturn *search.DocumentMatch) error { +func (s *BooleanSearcher) advanceNextMust(ctx *search.Context, skipReturn *search.DocumentMatch) error { var err error if s.mustSearcher != nil { @@ -176,38 +138,16 @@ func (s *BooleanSearcher) advanceNextMust(ctx *search.SearchContext, skipReturn } if s.mustSearcher != nil && s.currMust != nil { - s.currentID = s.currMust.IndexInternalID + s.currentMatch = s.currMust } else if s.mustSearcher == nil && s.currShould != nil { - s.currentID = s.currShould.IndexInternalID + s.currentMatch = s.currShould } else { - s.currentID = nil + s.currentMatch = nil } return nil } -func (s *BooleanSearcher) Weight() float64 { - var rv float64 - if s.mustSearcher != nil { - rv += s.mustSearcher.Weight() - } - if s.shouldSearcher != nil { - rv += s.shouldSearcher.Weight() - } - - return rv -} - -func (s *BooleanSearcher) SetQueryNorm(qnorm float64) { - if s.mustSearcher != nil { - s.mustSearcher.SetQueryNorm(qnorm) - } - if s.shouldSearcher != nil { - s.shouldSearcher.SetQueryNorm(qnorm) - } -} - -func (s *BooleanSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, error) { - +func (s *BooleanSearcher) Next(ctx *search.Context) (*search.DocumentMatch, error) { if s.done { return nil, nil } @@ -219,61 +159,46 @@ func (s *BooleanSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch } } - var err error - var rv *search.DocumentMatch + rv, err := s.nextInternal(ctx) + if err != nil { + return nil, err + } + + if rv == nil { + s.done = true + } + + return rv, nil +} - for s.currentID != nil { +func (s *BooleanSearcher) nextInternal(ctx *search.Context) (rv *search.DocumentMatch, err error) { + for s.currentMatch != nil { if s.currMustNot != nil { - cmp := s.currMustNot.IndexInternalID.Compare(s.currentID) - if cmp < 0 { - ctx.DocumentMatchPool.Put(s.currMustNot) - // advance must not searcher to our candidate entry - s.currMustNot, err = s.mustNotSearcher.Advance(ctx, s.currentID) - if err != nil { - return nil, err - } - if s.currMustNot != nil && s.currMustNot.IndexInternalID.Equals(s.currentID) { - // the candidate is excluded - err = s.advanceNextMust(ctx, nil) - if err != nil { - return nil, err - } - continue - } - } else if cmp == 0 { - // the candidate is excluded - err = s.advanceNextMust(ctx, nil) - if err != nil { - return nil, err - } + var mustNotExcludesCandidate bool + mustNotExcludesCandidate, err = s.doesMustNotExcludeCandidate(ctx) + if err != nil { + return nil, err + } + if mustNotExcludesCandidate { continue } } shouldCmpOrNil := 1 // NOTE: shouldCmp will also be 1 when currShould == nil. if s.currShould != nil { - shouldCmpOrNil = s.currShould.IndexInternalID.Compare(s.currentID) + shouldCmpOrNil = docNumberCompare(s.currShould.Number, s.currentMatch.Number) } if shouldCmpOrNil < 0 { ctx.DocumentMatchPool.Put(s.currShould) // advance should searcher to our candidate entry - s.currShould, err = s.shouldSearcher.Advance(ctx, s.currentID) + s.currShould, err = s.shouldSearcher.Advance(ctx, s.currentMatch.Number) if err != nil { return nil, err } - if s.currShould != nil && s.currShould.IndexInternalID.Equals(s.currentID) { + if s.currShould != nil && s.currShould.Number == s.currentMatch.Number { // score bonus matches should - var cons []*search.DocumentMatch - if s.currMust != nil { - cons = s.matches - cons[0] = s.currMust - cons[1] = s.currShould - } else { - cons = s.matches[0:1] - cons[0] = s.currShould - } - rv = s.scorer.Score(ctx, cons) + rv = s.buildDocumentMatch(s.buildConstituents()) err = s.advanceNextMust(ctx, rv) if err != nil { return nil, err @@ -283,7 +208,7 @@ func (s *BooleanSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch // match is OK anyway cons := s.matches[0:1] cons[0] = s.currMust - rv = s.scorer.Score(ctx, cons) + rv = s.buildDocumentMatch(cons) err = s.advanceNextMust(ctx, rv) if err != nil { return nil, err @@ -292,16 +217,7 @@ func (s *BooleanSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch } } else if shouldCmpOrNil == 0 { // score bonus matches should - var cons []*search.DocumentMatch - if s.currMust != nil { - cons = s.matches - cons[0] = s.currMust - cons[1] = s.currShould - } else { - cons = s.matches[0:1] - cons[0] = s.currShould - } - rv = s.scorer.Score(ctx, cons) + rv = s.buildDocumentMatch(s.buildConstituents()) err = s.advanceNextMust(ctx, rv) if err != nil { return nil, err @@ -311,7 +227,7 @@ func (s *BooleanSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch // match is OK anyway cons := s.matches[0:1] cons[0] = s.currMust - rv = s.scorer.Score(ctx, cons) + rv = s.buildDocumentMatch(cons) err = s.advanceNextMust(ctx, rv) if err != nil { return nil, err @@ -324,16 +240,51 @@ func (s *BooleanSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch return nil, err } } + return rv, nil +} - if rv == nil { - s.done = true +func (s *BooleanSearcher) doesMustNotExcludeCandidate(ctx *search.Context) (excluded bool, err error) { + cmp := docNumberCompare(s.currMustNot.Number, s.currentMatch.Number) + if cmp < 0 { + ctx.DocumentMatchPool.Put(s.currMustNot) + // advance must not searcher to our candidate entry + s.currMustNot, err = s.mustNotSearcher.Advance(ctx, s.currentMatch.Number) + if err != nil { + return false, err + } + if s.currMustNot != nil && s.currMustNot.Number == s.currentMatch.Number { + // the candidate is excluded + err = s.advanceNextMust(ctx, nil) + if err != nil { + return false, err + } + return true, nil + } + } else if cmp == 0 { + // the candidate is excluded + err = s.advanceNextMust(ctx, nil) + if err != nil { + return false, err + } + return true, nil } - - return rv, nil + return false, nil } -func (s *BooleanSearcher) Advance(ctx *search.SearchContext, ID index.IndexInternalID) (*search.DocumentMatch, error) { +func (s *BooleanSearcher) buildConstituents() []*search.DocumentMatch { + var cons []*search.DocumentMatch + if s.currMust != nil { + cons = s.matches + cons[0] = s.currMust + cons[1] = s.currShould + } else { + cons = s.matches[0:1] + cons[0] = s.currShould + } + return cons +} +func (s *BooleanSearcher) Advance(ctx *search.Context, number uint64) (*search.DocumentMatch, error) { if s.done { return nil, nil } @@ -346,57 +297,64 @@ func (s *BooleanSearcher) Advance(ctx *search.SearchContext, ID index.IndexInter } // Advance the searcher only if the cursor is trailing the lookup ID - if s.currentID == nil || s.currentID.Compare(ID) < 0 { - var err error - if s.mustSearcher != nil { - if s.currMust != nil { - ctx.DocumentMatchPool.Put(s.currMust) - } - s.currMust, err = s.mustSearcher.Advance(ctx, ID) - if err != nil { - return nil, err - } + if s.currentMatch == nil || docNumberCompare(s.currentMatch.Number, number) < 0 { + err := s.advanceIfTrailing(ctx, number) + if err != nil { + return nil, err } + } - if s.shouldSearcher != nil { - if s.currShould != nil { - ctx.DocumentMatchPool.Put(s.currShould) - } - s.currShould, err = s.shouldSearcher.Advance(ctx, ID) - if err != nil { - return nil, err - } + return s.Next(ctx) +} + +func (s *BooleanSearcher) advanceIfTrailing(ctx *search.Context, number uint64) error { + var err error + if s.mustSearcher != nil { + if s.currMust != nil { + ctx.DocumentMatchPool.Put(s.currMust) } + s.currMust, err = s.mustSearcher.Advance(ctx, number) + if err != nil { + return err + } + } - if s.mustNotSearcher != nil { - // Additional check for mustNotSearcher, whose cursor isn't tracked by - // currentID to prevent it from moving when the searcher's tracked - // position is already ahead of or at the requested ID. - if s.currMustNot == nil || s.currMustNot.IndexInternalID.Compare(ID) < 0 { - if s.currMustNot != nil { - ctx.DocumentMatchPool.Put(s.currMustNot) - } - s.currMustNot, err = s.mustNotSearcher.Advance(ctx, ID) - if err != nil { - return nil, err - } - } + if s.shouldSearcher != nil { + if s.currShould != nil { + ctx.DocumentMatchPool.Put(s.currShould) } + s.currShould, err = s.shouldSearcher.Advance(ctx, number) + if err != nil { + return err + } + } - if s.mustSearcher != nil && s.currMust != nil { - s.currentID = s.currMust.IndexInternalID - } else if s.mustSearcher == nil && s.currShould != nil { - s.currentID = s.currShould.IndexInternalID - } else { - s.currentID = nil + if s.mustNotSearcher != nil { + // Additional check for mustNotSearcher, whose cursor isn't tracked by + // currentID to prevent it from moving when the searcher's tracked + // position is already ahead of or at the requested ID. + if s.currMustNot == nil || s.currMustNot.Number < number { + if s.currMustNot != nil { + ctx.DocumentMatchPool.Put(s.currMustNot) + } + s.currMustNot, err = s.mustNotSearcher.Advance(ctx, number) + if err != nil { + return err + } } } - return s.Next(ctx) + if s.mustSearcher != nil && s.currMust != nil { + s.currentMatch = s.currMust + } else if s.mustSearcher == nil && s.currShould != nil { + s.currentMatch = s.currShould + } else { + s.currentMatch = nil + } + return nil } func (s *BooleanSearcher) Count() uint64 { - // for now return a worst case var sum uint64 if s.mustSearcher != nil { @@ -448,3 +406,18 @@ func (s *BooleanSearcher) DocumentMatchPoolSize() int { } return rv } + +func (s *BooleanSearcher) buildDocumentMatch(constituents []*search.DocumentMatch) *search.DocumentMatch { + rv := constituents[0] + if s.options.Explain { + rv.Explanation = s.scorer.ExplainComposite(constituents) + rv.Score = rv.Explanation.Value + } else { + rv.Score = s.scorer.ScoreComposite(constituents) + } + + rv.FieldTermLocations = search.MergeFieldTermLocations( + rv.FieldTermLocations, constituents[1:]) + + return rv +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_conjunction.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_conjunction.go similarity index 64% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_conjunction.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_conjunction.go index 5fe59b900..101c7be00 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_conjunction.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_conjunction.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,36 +15,22 @@ package searcher import ( - "math" - "reflect" "sort" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/scorer" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" + "github.com/blugelabs/bluge/search" ) -var reflectStaticSizeConjunctionSearcher int - -func init() { - var cs ConjunctionSearcher - reflectStaticSizeConjunctionSearcher = int(reflect.TypeOf(cs).Size()) -} - type ConjunctionSearcher struct { - indexReader index.IndexReader searchers OrderedSearcherList - queryNorm float64 currs []*search.DocumentMatch maxIDIdx int - scorer *scorer.ConjunctionQueryScorer initialized bool options search.SearcherOptions + scorer search.CompositeScorer } -func NewConjunctionSearcher(indexReader index.IndexReader, - qsearchers []search.Searcher, options search.SearcherOptions) ( +func NewConjunctionSearcher(indexReader search.Reader, + qsearchers []search.Searcher, scorer search.CompositeScorer, options search.SearcherOptions) ( search.Searcher, error) { // build the sorted downstream searchers searchers := make(OrderedSearcherList, len(qsearchers)) @@ -56,7 +42,7 @@ func NewConjunctionSearcher(indexReader index.IndexReader, // attempt the "unadorned" conjunction optimization only when we // do not need extra information like freq-norm's or term vectors if len(searchers) > 1 && - options.Score == "none" && !options.IncludeTermVectors { + options.Score == optionScoringNone && !options.IncludeTermVectors { rv, err := optimizeCompositeSearcher("conjunction:unadorned", indexReader, searchers, options) if err != nil || rv != nil { @@ -66,13 +52,11 @@ func NewConjunctionSearcher(indexReader index.IndexReader, // build our searcher rv := ConjunctionSearcher{ - indexReader: indexReader, - options: options, - searchers: searchers, - currs: make([]*search.DocumentMatch, len(searchers)), - scorer: scorer.NewConjunctionQueryScorer(options), + options: options, + searchers: searchers, + currs: make([]*search.DocumentMatch, len(searchers)), + scorer: scorer, } - rv.computeQueryNorm() // attempt push-down conjunction optimization when there's >1 searchers if len(searchers) > 1 { @@ -87,8 +71,7 @@ func NewConjunctionSearcher(indexReader index.IndexReader, } func (s *ConjunctionSearcher) Size() int { - sizeInBytes := reflectStaticSizeConjunctionSearcher + size.SizeOfPtr + - s.scorer.Size() + sizeInBytes := reflectStaticSizeConjunctionSearcher + sizeOfPtr for _, entry := range s.searchers { sizeInBytes += entry.Size() @@ -103,21 +86,7 @@ func (s *ConjunctionSearcher) Size() int { return sizeInBytes } -func (s *ConjunctionSearcher) computeQueryNorm() { - // first calculate sum of squared weights - sumOfSquaredWeights := 0.0 - for _, searcher := range s.searchers { - sumOfSquaredWeights += searcher.Weight() - } - // now compute query norm from this - s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights) - // finally tell all the downstream searchers the norm - for _, searcher := range s.searchers { - searcher.SetQueryNorm(s.queryNorm) - } -} - -func (s *ConjunctionSearcher) initSearchers(ctx *search.SearchContext) error { +func (s *ConjunctionSearcher) initSearchers(ctx *search.Context) error { var err error // get all searchers pointing at their first match for i, searcher := range s.searchers { @@ -133,21 +102,7 @@ func (s *ConjunctionSearcher) initSearchers(ctx *search.SearchContext) error { return nil } -func (s *ConjunctionSearcher) Weight() float64 { - var rv float64 - for _, searcher := range s.searchers { - rv += searcher.Weight() - } - return rv -} - -func (s *ConjunctionSearcher) SetQueryNorm(qnorm float64) { - for _, searcher := range s.searchers { - searcher.SetQueryNorm(qnorm) - } -} - -func (s *ConjunctionSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, error) { +func (s *ConjunctionSearcher) Next(ctx *search.Context) (*search.DocumentMatch, error) { if !s.initialized { err := s.initSearchers(ctx) if err != nil { @@ -158,7 +113,7 @@ func (s *ConjunctionSearcher) Next(ctx *search.SearchContext) (*search.DocumentM var err error OUTER: for s.maxIDIdx < len(s.currs) && s.currs[s.maxIDIdx] != nil { - maxID := s.currs[s.maxIDIdx].IndexInternalID + maxID := s.currs[s.maxIDIdx].Number i := 0 for i < len(s.currs) { @@ -171,7 +126,7 @@ OUTER: continue } - cmp := maxID.Compare(s.currs[i].IndexInternalID) + cmp := docNumberCompare(maxID, s.currs[i].Number) if cmp == 0 { i++ continue @@ -183,7 +138,7 @@ OUTER: // advance the positions where [0 <= x < i], since we // know they were equal to the former max entry - maxID = s.currs[s.maxIDIdx].IndexInternalID + maxID = s.currs[s.maxIDIdx].Number for x := 0; x < i; x++ { err = s.advanceChild(ctx, x, maxID) if err != nil { @@ -205,7 +160,7 @@ OUTER: } // if we get here, a doc matched all readers, so score and add it - rv = s.scorer.Score(ctx, s.currs) + rv = s.buildDocumentMatch(s.currs) // we know all the searchers are pointing at the same thing // so they all need to be bumped @@ -222,10 +177,11 @@ OUTER: // don't continue now, wait for the next call to Next() break } + return rv, nil } -func (s *ConjunctionSearcher) Advance(ctx *search.SearchContext, ID index.IndexInternalID) (*search.DocumentMatch, error) { +func (s *ConjunctionSearcher) Advance(ctx *search.Context, number uint64) (*search.DocumentMatch, error) { if !s.initialized { err := s.initSearchers(ctx) if err != nil { @@ -233,10 +189,10 @@ func (s *ConjunctionSearcher) Advance(ctx *search.SearchContext, ID index.IndexI } } for i := range s.searchers { - if s.currs[i] != nil && s.currs[i].IndexInternalID.Compare(ID) >= 0 { + if s.currs[i] != nil && s.currs[i].Number >= number { continue } - err := s.advanceChild(ctx, i, ID) + err := s.advanceChild(ctx, i, number) if err != nil { return nil, err } @@ -244,11 +200,11 @@ func (s *ConjunctionSearcher) Advance(ctx *search.SearchContext, ID index.IndexI return s.Next(ctx) } -func (s *ConjunctionSearcher) advanceChild(ctx *search.SearchContext, i int, ID index.IndexInternalID) (err error) { +func (s *ConjunctionSearcher) advanceChild(ctx *search.Context, i int, number uint64) (err error) { if s.currs[i] != nil { ctx.DocumentMatchPool.Put(s.currs[i]) } - s.currs[i], err = s.searchers[i].Advance(ctx, ID) + s.currs[i], err = s.searchers[i].Advance(ctx, number) return err } @@ -282,3 +238,18 @@ func (s *ConjunctionSearcher) DocumentMatchPoolSize() int { } return rv } + +func (s *ConjunctionSearcher) buildDocumentMatch(constituents []*search.DocumentMatch) *search.DocumentMatch { + rv := constituents[0] + if s.options.Explain { + rv.Explanation = s.scorer.ExplainComposite(constituents) + rv.Score = rv.Explanation.Value + } else { + rv.Score = s.scorer.ScoreComposite(constituents) + } + + rv.FieldTermLocations = search.MergeFieldTermLocations( + rv.FieldTermLocations, constituents[1:]) + + return rv +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_disjunction.go similarity index 66% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_disjunction.go index 4cee46841..18e92a60f 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_disjunction.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,12 @@ package searcher import ( "fmt" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" + + "github.com/blugelabs/bluge/search/similarity" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blugelabs/bluge/search" ) // DisjunctionMaxClauseCount is a compile time setting that applications can @@ -30,19 +34,19 @@ var DisjunctionMaxClauseCount = 0 // slice implementation to a heap implementation. var DisjunctionHeapTakeover = 10 -func NewDisjunctionSearcher(indexReader index.IndexReader, - qsearchers []search.Searcher, min float64, options search.SearcherOptions) ( +func NewDisjunctionSearcher(indexReader search.Reader, + qsearchers []search.Searcher, min int, scorer search.CompositeScorer, options search.SearcherOptions) ( search.Searcher, error) { - return newDisjunctionSearcher(indexReader, qsearchers, min, options, true) + return newDisjunctionSearcher(indexReader, qsearchers, min, scorer, options, true) } func optionsDisjunctionOptimizable(options search.SearcherOptions) bool { - rv := options.Score == "none" && !options.IncludeTermVectors + rv := options.Score == optionScoringNone && !options.IncludeTermVectors return rv } -func newDisjunctionSearcher(indexReader index.IndexReader, - qsearchers []search.Searcher, min float64, options search.SearcherOptions, +func newDisjunctionSearcher(indexReader search.Reader, + qsearchers []search.Searcher, min int, scorer search.CompositeScorer, options search.SearcherOptions, limit bool) (search.Searcher, error) { // attempt the "unadorned" disjunction optimization only when we // do not need extra information like freq-norm's or term vectors @@ -57,20 +61,21 @@ func newDisjunctionSearcher(indexReader index.IndexReader, } if len(qsearchers) > DisjunctionHeapTakeover { - return newDisjunctionHeapSearcher(indexReader, qsearchers, min, options, + return newDisjunctionHeapSearcher(qsearchers, min, scorer, options, limit) } - return newDisjunctionSliceSearcher(indexReader, qsearchers, min, options, + return newDisjunctionSliceSearcher(qsearchers, min, scorer, options, limit) } +const optionScoringNone = "none" + func optimizeCompositeSearcher(optimizationKind string, - indexReader index.IndexReader, qsearchers []search.Searcher, + indexReader search.Reader, qsearchers []search.Searcher, options search.SearcherOptions) (search.Searcher, error) { - var octx index.OptimizableContext - + var octx segment.OptimizableContext for _, searcher := range qsearchers { - o, ok := searcher.(index.Optimizable) + o, ok := searcher.(segment.Optimizable) if !ok { return nil, nil } @@ -91,13 +96,13 @@ func optimizeCompositeSearcher(optimizationKind string, return nil, err } - tfr, ok := optimized.(index.TermFieldReader) + tfr, ok := optimized.(segment.PostingsIterator) if !ok { return nil, nil } return newTermSearcherFromReader(indexReader, tfr, - []byte(optimizationKind), "*", 1.0, options) + []byte(optimizationKind), "*", 1.0, similarity.ConstantScorer(1), options) } func tooManyClauses(count int) bool { @@ -108,6 +113,6 @@ func tooManyClauses(count int) bool { } func tooManyClausesErr(field string, count int) error { - return fmt.Errorf("TooManyClauses over field: `%s` [%d > maxClauseCount,"+ - " which is set to %d]", field, count, DisjunctionMaxClauseCount) + return fmt.Errorf("tooManyClauses %d over field: `%s` > maxClauseCount, which is set to %d", + count, field, DisjunctionMaxClauseCount) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_heap.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_disjunction_heap.go similarity index 65% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_heap.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_disjunction_heap.go index bf945976d..4572b4a77 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_heap.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_disjunction_heap.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,50 +15,32 @@ package searcher import ( - "bytes" "container/heap" - "math" - "reflect" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/scorer" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeDisjunctionHeapSearcher int -var reflectStaticSizeSearcherCurr int + segment "github.com/blugelabs/bluge_segment_api" -func init() { - var dhs DisjunctionHeapSearcher - reflectStaticSizeDisjunctionHeapSearcher = int(reflect.TypeOf(dhs).Size()) - - var sc SearcherCurr - reflectStaticSizeSearcherCurr = int(reflect.TypeOf(sc).Size()) -} + "github.com/blugelabs/bluge/search" +) -type SearcherCurr struct { +type searcherCurr struct { searcher search.Searcher curr *search.DocumentMatch } type DisjunctionHeapSearcher struct { - indexReader index.IndexReader - numSearchers int - scorer *scorer.DisjunctionQueryScorer + scorer search.CompositeScorer min int - queryNorm float64 initialized bool searchers []search.Searcher - heap []*SearcherCurr + heap []*searcherCurr matching []*search.DocumentMatch - matchingCurrs []*SearcherCurr + matchingCurrs []*searcherCurr + options search.SearcherOptions } -func newDisjunctionHeapSearcher(indexReader index.IndexReader, - searchers []search.Searcher, min float64, options search.SearcherOptions, +func newDisjunctionHeapSearcher(searchers []search.Searcher, min int, scorer search.CompositeScorer, options search.SearcherOptions, limit bool) ( *DisjunctionHeapSearcher, error) { if limit && tooManyClauses(len(searchers)) { @@ -67,22 +49,20 @@ func newDisjunctionHeapSearcher(indexReader index.IndexReader, // build our searcher rv := DisjunctionHeapSearcher{ - indexReader: indexReader, searchers: searchers, numSearchers: len(searchers), - scorer: scorer.NewDisjunctionQueryScorer(options), - min: int(min), + scorer: scorer, + min: min, matching: make([]*search.DocumentMatch, len(searchers)), - matchingCurrs: make([]*SearcherCurr, len(searchers)), - heap: make([]*SearcherCurr, 0, len(searchers)), + matchingCurrs: make([]*searcherCurr, len(searchers)), + heap: make([]*searcherCurr, 0, len(searchers)), + options: options, } - rv.computeQueryNorm() return &rv, nil } func (s *DisjunctionHeapSearcher) Size() int { - sizeInBytes := reflectStaticSizeDisjunctionHeapSearcher + size.SizeOfPtr + - s.scorer.Size() + sizeInBytes := reflectStaticSizeDisjunctionHeapSearcher + sizeOfPtr for _, entry := range s.searchers { sizeInBytes += entry.Size() @@ -102,23 +82,9 @@ func (s *DisjunctionHeapSearcher) Size() int { return sizeInBytes } -func (s *DisjunctionHeapSearcher) computeQueryNorm() { - // first calculate sum of squared weights - sumOfSquaredWeights := 0.0 - for _, searcher := range s.searchers { - sumOfSquaredWeights += searcher.Weight() - } - // now compute query norm from this - s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights) - // finally tell all the downstream searchers the norm - for _, searcher := range s.searchers { - searcher.SetQueryNorm(s.queryNorm) - } -} - -func (s *DisjunctionHeapSearcher) initSearchers(ctx *search.SearchContext) error { +func (s *DisjunctionHeapSearcher) initSearchers(ctx *search.Context) error { // alloc a single block of SearcherCurrs - block := make([]SearcherCurr, len(s.searchers)) + block := make([]searcherCurr, len(s.searchers)) // get all searchers pointing at their first match for i, searcher := range s.searchers { @@ -146,15 +112,14 @@ func (s *DisjunctionHeapSearcher) updateMatches() error { matchingCurrs := s.matchingCurrs[:0] if len(s.heap) > 0 { - // top of the heap is our next hit - next := heap.Pop(s).(*SearcherCurr) + next := heap.Pop(s).(*searcherCurr) matching = append(matching, next.curr) matchingCurrs = append(matchingCurrs, next) // now as long as top of heap matches, keep popping - for len(s.heap) > 0 && bytes.Compare(next.curr.IndexInternalID, s.heap[0].curr.IndexInternalID) == 0 { - next = heap.Pop(s).(*SearcherCurr) + for len(s.heap) > 0 && next.curr.Number == s.heap[0].curr.Number { + next = heap.Pop(s).(*searcherCurr) matching = append(matching, next.curr) matchingCurrs = append(matchingCurrs, next) } @@ -166,21 +131,7 @@ func (s *DisjunctionHeapSearcher) updateMatches() error { return nil } -func (s *DisjunctionHeapSearcher) Weight() float64 { - var rv float64 - for _, searcher := range s.searchers { - rv += searcher.Weight() - } - return rv -} - -func (s *DisjunctionHeapSearcher) SetQueryNorm(qnorm float64) { - for _, searcher := range s.searchers { - searcher.SetQueryNorm(qnorm) - } -} - -func (s *DisjunctionHeapSearcher) Next(ctx *search.SearchContext) ( +func (s *DisjunctionHeapSearcher) Next(ctx *search.Context) ( *search.DocumentMatch, error) { if !s.initialized { err := s.initSearchers(ctx) @@ -195,7 +146,7 @@ func (s *DisjunctionHeapSearcher) Next(ctx *search.SearchContext) ( if len(s.matching) >= s.min { found = true // score this match - rv = s.scorer.Score(ctx, s.matching, len(s.matching), s.numSearchers) + rv = s.buildDocumentMatch(s.matching) } // invoke next on all the matching searchers @@ -222,8 +173,8 @@ func (s *DisjunctionHeapSearcher) Next(ctx *search.SearchContext) ( return rv, nil } -func (s *DisjunctionHeapSearcher) Advance(ctx *search.SearchContext, - ID index.IndexInternalID) (*search.DocumentMatch, error) { +func (s *DisjunctionHeapSearcher) Advance(ctx *search.Context, + number uint64) (*search.DocumentMatch, error) { if !s.initialized { err := s.initSearchers(ctx) if err != nil { @@ -240,10 +191,10 @@ func (s *DisjunctionHeapSearcher) Advance(ctx *search.SearchContext, // find all searchers that actually need to be advanced // advance them, using s.matchingCurrs as temp storage - for len(s.heap) > 0 && bytes.Compare(s.heap[0].curr.IndexInternalID, ID) < 0 { - searcherCurr := heap.Pop(s).(*SearcherCurr) + for len(s.heap) > 0 && docNumberCompare(s.heap[0].curr.Number, number) < 0 { + searcherCurr := heap.Pop(s).(*searcherCurr) ctx.DocumentMatchPool.Put(searcherCurr.curr) - curr, err := searcherCurr.searcher.Advance(ctx, ID) + curr, err := searcherCurr.searcher.Advance(ctx, number) if err != nil { return nil, err } @@ -301,10 +252,10 @@ func (s *DisjunctionHeapSearcher) DocumentMatchPoolSize() int { // a disjunction searcher implements the index.Optimizable interface // but only activates on an edge case where the disjunction is a // wrapper around a single Optimizable child searcher -func (s *DisjunctionHeapSearcher) Optimize(kind string, octx index.OptimizableContext) ( - index.OptimizableContext, error) { +func (s *DisjunctionHeapSearcher) Optimize(kind string, octx segment.OptimizableContext) ( + segment.OptimizableContext, error) { if len(s.searchers) == 1 { - o, ok := s.searchers[0].(index.Optimizable) + o, ok := s.searchers[0].(segment.Optimizable) if ok { return o.Optimize(kind, octx) } @@ -323,7 +274,7 @@ func (s *DisjunctionHeapSearcher) Less(i, j int) bool { } else if s.heap[j].curr == nil { return false } - return bytes.Compare(s.heap[i].curr.IndexInternalID, s.heap[j].curr.IndexInternalID) < 0 + return docNumberCompare(s.heap[i].curr.Number, s.heap[j].curr.Number) < 0 } func (s *DisjunctionHeapSearcher) Swap(i, j int) { @@ -331,7 +282,7 @@ func (s *DisjunctionHeapSearcher) Swap(i, j int) { } func (s *DisjunctionHeapSearcher) Push(x interface{}) { - s.heap = append(s.heap, x.(*SearcherCurr)) + s.heap = append(s.heap, x.(*searcherCurr)) } func (s *DisjunctionHeapSearcher) Pop() interface{} { @@ -341,3 +292,18 @@ func (s *DisjunctionHeapSearcher) Pop() interface{} { s.heap = old[0 : n-1] return x } + +func (s *DisjunctionHeapSearcher) buildDocumentMatch(constituents []*search.DocumentMatch) *search.DocumentMatch { + rv := constituents[0] + if s.options.Explain { + rv.Explanation = s.scorer.ExplainComposite(constituents) + rv.Score = rv.Explanation.Value + } else { + rv.Score = s.scorer.ScoreComposite(constituents) + } + + rv.FieldTermLocations = search.MergeFieldTermLocations( + rv.FieldTermLocations, constituents[1:]) + + return rv +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_slice.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_disjunction_slice.go similarity index 67% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_slice.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_disjunction_slice.go index 79fee9f4d..d3a45b7ac 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_slice.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_disjunction_slice.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,38 +15,26 @@ package searcher import ( - "math" - "reflect" "sort" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/search/scorer" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizeDisjunctionSliceSearcher int + segment "github.com/blugelabs/bluge_segment_api" -func init() { - var ds DisjunctionSliceSearcher - reflectStaticSizeDisjunctionSliceSearcher = int(reflect.TypeOf(ds).Size()) -} + "github.com/blugelabs/bluge/search" +) type DisjunctionSliceSearcher struct { - indexReader index.IndexReader searchers OrderedSearcherList numSearchers int - queryNorm float64 currs []*search.DocumentMatch - scorer *scorer.DisjunctionQueryScorer + scorer search.CompositeScorer min int matching []*search.DocumentMatch matchingIdxs []int initialized bool + options search.SearcherOptions } -func newDisjunctionSliceSearcher(indexReader index.IndexReader, - qsearchers []search.Searcher, min float64, options search.SearcherOptions, +func newDisjunctionSliceSearcher(qsearchers []search.Searcher, min int, scorer search.CompositeScorer, options search.SearcherOptions, limit bool) ( *DisjunctionSliceSearcher, error) { if limit && tooManyClauses(len(qsearchers)) { @@ -61,22 +49,20 @@ func newDisjunctionSliceSearcher(indexReader index.IndexReader, sort.Sort(sort.Reverse(searchers)) // build our searcher rv := DisjunctionSliceSearcher{ - indexReader: indexReader, searchers: searchers, numSearchers: len(searchers), currs: make([]*search.DocumentMatch, len(searchers)), - scorer: scorer.NewDisjunctionQueryScorer(options), - min: int(min), + scorer: scorer, + min: min, matching: make([]*search.DocumentMatch, len(searchers)), matchingIdxs: make([]int, len(searchers)), + options: options, } - rv.computeQueryNorm() return &rv, nil } func (s *DisjunctionSliceSearcher) Size() int { - sizeInBytes := reflectStaticSizeDisjunctionSliceSearcher + size.SizeOfPtr + - s.scorer.Size() + sizeInBytes := reflectStaticSizeDisjunctionSliceSearcher + sizeOfPtr for _, entry := range s.searchers { sizeInBytes += entry.Size() @@ -94,26 +80,12 @@ func (s *DisjunctionSliceSearcher) Size() int { } } - sizeInBytes += len(s.matchingIdxs) * size.SizeOfInt + sizeInBytes += len(s.matchingIdxs) * sizeOfInt return sizeInBytes } -func (s *DisjunctionSliceSearcher) computeQueryNorm() { - // first calculate sum of squared weights - sumOfSquaredWeights := 0.0 - for _, searcher := range s.searchers { - sumOfSquaredWeights += searcher.Weight() - } - // now compute query norm from this - s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights) - // finally tell all the downstream searchers the norm - for _, searcher := range s.searchers { - searcher.SetQueryNorm(s.queryNorm) - } -} - -func (s *DisjunctionSliceSearcher) initSearchers(ctx *search.SearchContext) error { +func (s *DisjunctionSliceSearcher) initSearchers(ctx *search.Context) error { var err error // get all searchers pointing at their first match for i, searcher := range s.searchers { @@ -146,7 +118,7 @@ func (s *DisjunctionSliceSearcher) updateMatches() error { } if len(matching) > 0 { - cmp := curr.IndexInternalID.Compare(matching[0].IndexInternalID) + cmp := docNumberCompare(curr.Number, matching[0].Number) if cmp > 0 { continue } @@ -167,21 +139,7 @@ func (s *DisjunctionSliceSearcher) updateMatches() error { return nil } -func (s *DisjunctionSliceSearcher) Weight() float64 { - var rv float64 - for _, searcher := range s.searchers { - rv += searcher.Weight() - } - return rv -} - -func (s *DisjunctionSliceSearcher) SetQueryNorm(qnorm float64) { - for _, searcher := range s.searchers { - searcher.SetQueryNorm(qnorm) - } -} - -func (s *DisjunctionSliceSearcher) Next(ctx *search.SearchContext) ( +func (s *DisjunctionSliceSearcher) Next(ctx *search.Context) ( *search.DocumentMatch, error) { if !s.initialized { err := s.initSearchers(ctx) @@ -197,7 +155,7 @@ func (s *DisjunctionSliceSearcher) Next(ctx *search.SearchContext) ( if len(s.matching) >= s.min { found = true // score this match - rv = s.scorer.Score(ctx, s.matching, len(s.matching), s.numSearchers) + rv = s.buildDocumentMatch(s.matching) } // invoke next on all the matching searchers @@ -217,11 +175,12 @@ func (s *DisjunctionSliceSearcher) Next(ctx *search.SearchContext) ( return nil, err } } + return rv, nil } -func (s *DisjunctionSliceSearcher) Advance(ctx *search.SearchContext, - ID index.IndexInternalID) (*search.DocumentMatch, error) { +func (s *DisjunctionSliceSearcher) Advance(ctx *search.Context, + number uint64) (*search.DocumentMatch, error) { if !s.initialized { err := s.initSearchers(ctx) if err != nil { @@ -232,12 +191,12 @@ func (s *DisjunctionSliceSearcher) Advance(ctx *search.SearchContext, var err error for i, searcher := range s.searchers { if s.currs[i] != nil { - if s.currs[i].IndexInternalID.Compare(ID) >= 0 { + if s.currs[i].Number >= number { continue } ctx.DocumentMatchPool.Put(s.currs[i]) } - s.currs[i], err = searcher.Advance(ctx, ID) + s.currs[i], err = searcher.Advance(ctx, number) if err != nil { return nil, err } @@ -285,10 +244,10 @@ func (s *DisjunctionSliceSearcher) DocumentMatchPoolSize() int { // a disjunction searcher implements the index.Optimizable interface // but only activates on an edge case where the disjunction is a // wrapper around a single Optimizable child searcher -func (s *DisjunctionSliceSearcher) Optimize(kind string, octx index.OptimizableContext) ( - index.OptimizableContext, error) { +func (s *DisjunctionSliceSearcher) Optimize(kind string, octx segment.OptimizableContext) ( + segment.OptimizableContext, error) { if len(s.searchers) == 1 { - o, ok := s.searchers[0].(index.Optimizable) + o, ok := s.searchers[0].(segment.Optimizable) if ok { return o.Optimize(kind, octx) } @@ -296,3 +255,27 @@ func (s *DisjunctionSliceSearcher) Optimize(kind string, octx index.OptimizableC return nil, nil } + +func docNumberCompare(a, b uint64) int { + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 +} + +func (s *DisjunctionSliceSearcher) buildDocumentMatch(constituents []*search.DocumentMatch) *search.DocumentMatch { + rv := constituents[0] + if s.options.Explain { + rv.Explanation = s.scorer.ExplainComposite(constituents) + rv.Score = rv.Explanation.Value + } else { + rv.Score = s.scorer.ScoreComposite(constituents) + } + + rv.FieldTermLocations = search.MergeFieldTermLocations( + rv.FieldTermLocations, constituents[1:]) + + return rv +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_filter.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_filter.go similarity index 69% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_filter.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_filter.go index 9cab0f78b..e4d7e8089 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_filter.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_filter.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,20 +15,9 @@ package searcher import ( - "reflect" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" + "github.com/blugelabs/bluge/search" ) -var reflectStaticSizeFilteringSearcher int - -func init() { - var fs FilteringSearcher - reflectStaticSizeFilteringSearcher = int(reflect.TypeOf(fs).Size()) -} - // FilterFunc defines a function which can filter documents // returning true means keep the document // returning false means do not keep the document @@ -49,11 +38,11 @@ func NewFilteringSearcher(s search.Searcher, filter FilterFunc) *FilteringSearch } func (f *FilteringSearcher) Size() int { - return reflectStaticSizeFilteringSearcher + size.SizeOfPtr + + return reflectStaticSizeFilteringSearcher + sizeOfPtr + f.child.Size() } -func (f *FilteringSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, error) { +func (f *FilteringSearcher) Next(ctx *search.Context) (*search.DocumentMatch, error) { next, err := f.child.Next(ctx) for next != nil && err == nil { if f.accept(next) { @@ -64,8 +53,8 @@ func (f *FilteringSearcher) Next(ctx *search.SearchContext) (*search.DocumentMat return nil, err } -func (f *FilteringSearcher) Advance(ctx *search.SearchContext, ID index.IndexInternalID) (*search.DocumentMatch, error) { - adv, err := f.child.Advance(ctx, ID) +func (f *FilteringSearcher) Advance(ctx *search.Context, number uint64) (*search.DocumentMatch, error) { + adv, err := f.child.Advance(ctx, number) if err != nil { return nil, err } @@ -82,14 +71,6 @@ func (f *FilteringSearcher) Close() error { return f.child.Close() } -func (f *FilteringSearcher) Weight() float64 { - return f.child.Weight() -} - -func (f *FilteringSearcher) SetQueryNorm(n float64) { - f.child.SetQueryNorm(n) -} - func (f *FilteringSearcher) Count() uint64 { return f.child.Count() } diff --git a/vendor/github.com/blugelabs/bluge/search/searcher/search_fuzzy.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_fuzzy.go new file mode 100644 index 000000000..c53673f7b --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_fuzzy.go @@ -0,0 +1,149 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package searcher + +import ( + "fmt" + "unicode/utf8" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blevesearch/vellum" + "github.com/blevesearch/vellum/levenshtein" + "github.com/blugelabs/bluge/search" +) + +// reusable, thread-safe levenshtein builders +var levAutomatonBuilders map[int]*levenshtein.LevenshteinAutomatonBuilder + +func init() { + levAutomatonBuilders = map[int]*levenshtein.LevenshteinAutomatonBuilder{} + supportedFuzziness := []int{1, 2} + for _, fuzziness := range supportedFuzziness { + lb, err := levenshtein.NewLevenshteinAutomatonBuilder(uint8(fuzziness), true) + if err != nil { + panic(fmt.Errorf("levenshtein automaton ed1 builder err: %v", err)) + } + levAutomatonBuilders[fuzziness] = lb + } +} + +var MaxFuzziness = 2 + +func NewFuzzySearcher(indexReader search.Reader, term string, + prefix, fuzziness int, field string, boost float64, scorer search.Scorer, + compScorer search.CompositeScorer, options search.SearcherOptions) (search.Searcher, error) { + if fuzziness > MaxFuzziness { + return nil, fmt.Errorf("fuzziness exceeds max (%d)", MaxFuzziness) + } + + if fuzziness < 0 { + return nil, fmt.Errorf("invalid fuzziness, negative") + } + + // Note: we don't byte slice the term for a prefix because of runes. + prefixTerm := "" + for i, r := range term { + if i < prefix { + prefixTerm += string(r) + } else { + break + } + } + candidateTerms, termBoosts, err := findFuzzyCandidateTerms(indexReader, term, fuzziness, + field, prefixTerm) + if err != nil { + return nil, err + } + + return NewMultiTermSearcherIndividualBoost(indexReader, candidateTerms, termBoosts, field, + boost, scorer, compScorer, options, true) +} + +func findFuzzyCandidateTerms(indexReader search.Reader, term string, + fuzziness int, field, prefixTerm string) (terms []string, boosts []float64, err error) { + automatons, err := getLevAutomatons(term, fuzziness) + if err != nil { + return nil, nil, err + } + + var prefixBeg, prefixEnd []byte + if prefixTerm != "" { + prefixBeg = []byte(prefixTerm) + prefixEnd = incrementBytes(prefixBeg) + } + + fieldDict, err := indexReader.DictionaryIterator(field, automatons[0], prefixBeg, prefixEnd) + if err != nil { + return nil, nil, err + } + defer func() { + if cerr := fieldDict.Close(); cerr != nil && err == nil { + err = cerr + } + }() + + termLen := utf8.RuneCountInString(term) + + tfd, err := fieldDict.Next() + for err == nil && tfd != nil { + terms = append(terms, tfd.Term()) + if tooManyClauses(len(terms)) { + return nil, nil, tooManyClausesErr(field, len(terms)) + } + // compute actual edit distance for this term + boost := 1.0 + if tfd.Term() != term { + boost = boostFromDistance(fuzziness, automatons, tfd.Term(), termLen) + } + boosts = append(boosts, boost) + tfd, err = fieldDict.Next() + } + return terms, boosts, err +} + +func boostFromDistance(fuzziness int, automatons []segment.Automaton, dictTerm string, searchTermLen int) float64 { + termEditDistance := fuzziness // start assuming it is fuzziness of automaton that found it + for i := 1; i < len(automatons); i++ { + if vellum.AutomatonContains(automatons[i], []byte(dictTerm)) { + termEditDistance-- + } + } + minTermLen := searchTermLen + thisTermLen := utf8.RuneCountInString(dictTerm) + if thisTermLen < minTermLen { + minTermLen = thisTermLen + } + return 1.0 - (float64(termEditDistance) / float64(minTermLen)) +} + +func getLevAutomaton(term string, fuzziness int) (segment.Automaton, error) { + if levAutomatonBuilder, ok := levAutomatonBuilders[fuzziness]; ok { + return levAutomatonBuilder.BuildDfa(term, uint8(fuzziness)) + } + return nil, fmt.Errorf("unsupported fuzziness: %d", fuzziness) +} + +func getLevAutomatons(term string, maxFuzziness int) (rv []segment.Automaton, err error) { + for fuzziness := maxFuzziness; fuzziness > 0; fuzziness-- { + var levAutomaton segment.Automaton + levAutomaton, err = getLevAutomaton(term, fuzziness) + if err != nil { + return nil, err + } + rv = append(rv, levAutomaton) + } + return rv, nil +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoboundingbox.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_geoboundingbox.go similarity index 67% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoboundingbox.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_geoboundingbox.go index a23151466..8f978062b 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoboundingbox.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_geoboundingbox.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,11 +15,11 @@ package searcher import ( - "github.com/blevesearch/bleve/v2/document" - "github.com/blevesearch/bleve/v2/geo" - "github.com/blevesearch/bleve/v2/numeric" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" + "github.com/blugelabs/bluge/numeric" + "github.com/blugelabs/bluge/numeric/geo" + "github.com/blugelabs/bluge/search" + "github.com/blugelabs/bluge/search/similarity" + segment "github.com/blugelabs/bluge_segment_api" ) type filterFunc func(key []byte) bool @@ -27,11 +27,11 @@ type filterFunc func(key []byte) bool var GeoBitsShift1 = geo.GeoBits << 1 var GeoBitsShift1Minus1 = GeoBitsShift1 - 1 -func NewGeoBoundingBoxSearcher(indexReader index.IndexReader, minLon, minLat, - maxLon, maxLat float64, field string, boost float64, - options search.SearcherOptions, checkBoundaries bool) ( +func NewGeoBoundingBoxSearcher(indexReader search.Reader, minLon, minLat, + maxLon, maxLat float64, field string, boost float64, scorer search.Scorer, + compScorer search.CompositeScorer, options search.SearcherOptions, + checkBoundaries bool, precisionStep uint) ( search.Searcher, error) { - // track list of opened searchers, for cleanup on early exit var openedSearchers []search.Searcher cleanupOpenedSearchers := func() { @@ -42,26 +42,26 @@ func NewGeoBoundingBoxSearcher(indexReader index.IndexReader, minLon, minLat, // do math to produce list of terms needed for this search onBoundaryTerms, notOnBoundaryTerms, err := ComputeGeoRange(0, GeoBitsShift1Minus1, - minLon, minLat, maxLon, maxLat, checkBoundaries, indexReader, field) + minLon, minLat, maxLon, maxLat, checkBoundaries, indexReader, field, precisionStep) if err != nil { return nil, err } var onBoundarySearcher search.Searcher - dvReader, err := indexReader.DocValueReader([]string{field}) + dvReader, err := indexReader.DocumentValueReader([]string{field}) if err != nil { return nil, err } if len(onBoundaryTerms) > 0 { rawOnBoundarySearcher, err := NewMultiTermSearcherBytes(indexReader, - onBoundaryTerms, field, boost, options, false) + onBoundaryTerms, field, boost, scorer, compScorer, options, false) if err != nil { return nil, err } // add filter to check points near the boundary onBoundarySearcher = NewFilteringSearcher(rawOnBoundarySearcher, - buildRectFilter(dvReader, field, minLon, minLat, maxLon, maxLat)) + buildRectFilter(dvReader, minLon, minLat, maxLon, maxLat)) openedSearchers = append(openedSearchers, onBoundarySearcher) } @@ -69,7 +69,7 @@ func NewGeoBoundingBoxSearcher(indexReader index.IndexReader, minLon, minLat, if len(notOnBoundaryTerms) > 0 { var err error notOnBoundarySearcher, err = NewMultiTermSearcherBytes(indexReader, - notOnBoundaryTerms, field, boost, options, false) + notOnBoundaryTerms, field, boost, scorer, compScorer, options, false) if err != nil { cleanupOpenedSearchers() return nil, err @@ -83,7 +83,7 @@ func NewGeoBoundingBoxSearcher(indexReader index.IndexReader, minLon, minLat, onBoundarySearcher, notOnBoundarySearcher, }, - 0, options) + 0, similarity.NewCompositeSumScorer(), options) if err != nil { cleanupOpenedSearchers() return nil, err @@ -95,18 +95,15 @@ func NewGeoBoundingBoxSearcher(indexReader index.IndexReader, minLon, minLat, return notOnBoundarySearcher, nil } - return NewMatchNoneSearcher(indexReader) + return NewMatchNoneSearcher(indexReader, options) } -var geoMaxShift = document.GeoPrecisionStep * 4 -var geoDetailLevel = ((geo.GeoBits << 1) - geoMaxShift) / 2 - -type closeFunc func() error - func ComputeGeoRange(term uint64, shift uint, sminLon, sminLat, smaxLon, smaxLat float64, checkBoundaries bool, - indexReader index.IndexReader, field string) ( - onBoundary [][]byte, notOnBoundary [][]byte, err error) { + indexReader search.Reader, field string, precisionStep uint) ( + onBoundary, notOnBoundary [][]byte, err error) { + var geoMaxShift = precisionStep * 4 + var geoDetailLevel = ((geo.GeoBits << 1) - geoMaxShift) / 2 isIndexed, closeF, err := buildIsIndexedFunc(indexReader, field) if closeF != nil { @@ -119,14 +116,16 @@ func ComputeGeoRange(term uint64, shift uint, } grc := &geoRangeCompute{ - preallocBytesLen: 32, - preallocBytes: make([]byte, 32), + preAllocBytesLen: 32, + preAllocBytes: make([]byte, 32), sminLon: sminLon, sminLat: sminLat, smaxLon: smaxLon, smaxLat: smaxLat, checkBoundaries: checkBoundaries, isIndexed: isIndexed, + geoDetailLevel: geoDetailLevel, + precisionStep: precisionStep, } grc.computeGeoRange(term, shift) @@ -134,56 +133,12 @@ func ComputeGeoRange(term uint64, shift uint, return grc.onBoundary, grc.notOnBoundary, nil } -func buildIsIndexedFunc(indexReader index.IndexReader, field string) (isIndexed filterFunc, closeF closeFunc, err error) { - if irr, ok := indexReader.(index.IndexReaderContains); ok { - fieldDict, err := irr.FieldDictContains(field) - if err != nil { - return nil, nil, err - } - - isIndexed = func(term []byte) bool { - found, err := fieldDict.Contains(term) - return err == nil && found - } - - closeF = func() error { - if fd, ok := fieldDict.(index.FieldDict); ok { - err := fd.Close() - if err != nil { - return err - } - } - return nil - } - } else if indexReader != nil { - isIndexed = func(term []byte) bool { - reader, err := indexReader.TermFieldReader(term, field, false, false, false) - if err != nil || reader == nil { - return false - } - if reader.Count() == 0 { - _ = reader.Close() - return false - } - _ = reader.Close() - return true - } - - } else { - isIndexed = func([]byte) bool { - return true - } - } - return isIndexed, closeF, err -} - -func buildRectFilter(dvReader index.DocValueReader, field string, - minLon, minLat, maxLon, maxLat float64) FilterFunc { +func buildRectFilter(dvReader segment.DocumentValueReader, minLon, minLat, maxLon, maxLat float64) FilterFunc { return func(d *search.DocumentMatch) bool { // check geo matches against all numeric type terms indexed var lons, lats []float64 var found bool - err := dvReader.VisitDocValues(d.IndexInternalID, func(field string, term []byte) { + err := dvReader.VisitDocumentValues(d.Number, func(field string, term []byte) { // only consider the values which are shifted 0 prefixCoded := numeric.PrefixCoded(term) shift, err := prefixCoded.Shift() @@ -209,31 +164,59 @@ func buildRectFilter(dvReader index.DocValueReader, field string, } } +type closeFunc func() error + +func buildIsIndexedFunc(indexReader search.Reader, field string) (isIndexed filterFunc, closeF closeFunc, err error) { + if indexReader != nil { + var dictLookup segment.DictionaryLookup + dictLookup, err = indexReader.DictionaryLookup(field) + if err != nil { + return nil, nil, err + } + + isIndexed = func(term []byte) bool { + found, err2 := dictLookup.Contains(term) + return err2 == nil && found + } + + closeF = dictLookup.Close + } else { + isIndexed = func([]byte) bool { + return true + } + } + return isIndexed, closeF, err +} + +const maxValidShift = 63 + type geoRangeCompute struct { - preallocBytesLen int - preallocBytes []byte + preAllocBytesLen int + preAllocBytes []byte sminLon, sminLat, smaxLon, smaxLat float64 checkBoundaries bool onBoundary, notOnBoundary [][]byte isIndexed func(term []byte) bool + geoDetailLevel uint + precisionStep uint } func (grc *geoRangeCompute) makePrefixCoded(in int64, shift uint) (rv numeric.PrefixCoded) { - if len(grc.preallocBytes) <= 0 { - grc.preallocBytesLen = grc.preallocBytesLen * 2 - grc.preallocBytes = make([]byte, grc.preallocBytesLen) + if len(grc.preAllocBytes) == 0 { + grc.preAllocBytesLen *= 2 + grc.preAllocBytes = make([]byte, grc.preAllocBytesLen) } - rv, grc.preallocBytes, _ = - numeric.NewPrefixCodedInt64Prealloc(in, shift, grc.preallocBytes) + rv, grc.preAllocBytes, _ = + numeric.NewPrefixCodedInt64Prealloc(in, shift, grc.preAllocBytes) return rv } func (grc *geoRangeCompute) computeGeoRange(term uint64, shift uint) { - split := term | uint64(0x1)<> 1 + level := ((geo.GeoBits << 1) - res) >> 1 - within := res%document.GeoPrecisionStep == 0 && + within := res%grc.precisionStep == 0 && geo.RectWithin(minLon, minLat, maxLon, maxLat, grc.sminLon, grc.sminLat, grc.smaxLon, grc.smaxLat) - if within || (level == geoDetailLevel && + if within || (level == grc.geoDetailLevel && geo.RectIntersects(minLon, minLat, maxLon, maxLat, grc.sminLon, grc.sminLat, grc.smaxLon, grc.smaxLat)) { codedTerm := grc.makePrefixCoded(int64(start), res) @@ -265,7 +248,7 @@ func (grc *geoRangeCompute) relateAndRecurse(start, end uint64, res uint) { grc.notOnBoundary = append(grc.notOnBoundary, codedTerm) } } - } else if level < geoDetailLevel && + } else if level < grc.geoDetailLevel && geo.RectIntersects(minLon, minLat, maxLon, maxLat, grc.sminLon, grc.sminLat, grc.smaxLon, grc.smaxLat) { grc.computeGeoRange(start, res-1) diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopointdistance.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_geopointdistance.go similarity index 65% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopointdistance.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_geopointdistance.go index c62a2a5b7..16ba0752d 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopointdistance.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_geopointdistance.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,15 +15,17 @@ package searcher import ( - "github.com/blevesearch/bleve/v2/geo" - "github.com/blevesearch/bleve/v2/numeric" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" + "github.com/blugelabs/bluge/numeric" + "github.com/blugelabs/bluge/numeric/geo" + "github.com/blugelabs/bluge/search" + "github.com/blugelabs/bluge/search/similarity" + segment "github.com/blugelabs/bluge_segment_api" ) -func NewGeoPointDistanceSearcher(indexReader index.IndexReader, centerLon, - centerLat, dist float64, field string, boost float64, - options search.SearcherOptions) (search.Searcher, error) { +func NewGeoPointDistanceSearcher(indexReader search.Reader, centerLon, + centerLat, dist float64, field string, boost float64, scorer search.Scorer, + compScorer search.CompositeScorer, options search.SearcherOptions, + precisionStep uint) (search.Searcher, error) { // compute bounding box containing the circle topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, err := geo.RectFromPointDistance(centerLon, centerLat, dist) @@ -34,47 +36,47 @@ func NewGeoPointDistanceSearcher(indexReader index.IndexReader, centerLon, // build a searcher for the box boxSearcher, err := boxSearcher(indexReader, topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, - field, boost, options, false) + field, boost, scorer, compScorer, options, false, precisionStep) if err != nil { return nil, err } - dvReader, err := indexReader.DocValueReader([]string{field}) + dvReader, err := indexReader.DocumentValueReader([]string{field}) if err != nil { return nil, err } // wrap it in a filtering searcher which checks the actual distance return NewFilteringSearcher(boxSearcher, - buildDistFilter(dvReader, field, centerLon, centerLat, dist)), nil + buildDistFilter(dvReader, centerLon, centerLat, dist)), nil } // boxSearcher builds a searcher for the described bounding box // if the desired box crosses the dateline, it is automatically split into // two boxes joined through a disjunction searcher -func boxSearcher(indexReader index.IndexReader, +func boxSearcher(indexReader search.Reader, topLeftLon, topLeftLat, bottomRightLon, bottomRightLat float64, - field string, boost float64, options search.SearcherOptions, checkBoundaries bool) ( - search.Searcher, error) { + field string, boost float64, scorer search.Scorer, compScorer search.CompositeScorer, + options search.SearcherOptions, checkBoundaries bool, precisionStep uint) (search.Searcher, error) { if bottomRightLon < topLeftLon { // cross date line, rewrite as two parts leftSearcher, err := NewGeoBoundingBoxSearcher(indexReader, -180, bottomRightLat, bottomRightLon, topLeftLat, - field, boost, options, checkBoundaries) + field, boost, scorer, compScorer, options, checkBoundaries, precisionStep) if err != nil { return nil, err } rightSearcher, err := NewGeoBoundingBoxSearcher(indexReader, - topLeftLon, bottomRightLat, 180, topLeftLat, field, boost, options, - checkBoundaries) + topLeftLon, bottomRightLat, 180, topLeftLat, field, boost, scorer, compScorer, options, + checkBoundaries, precisionStep) if err != nil { _ = leftSearcher.Close() return nil, err } boxSearcher, err := NewDisjunctionSearcher(indexReader, - []search.Searcher{leftSearcher, rightSearcher}, 0, options) + []search.Searcher{leftSearcher, rightSearcher}, 0, similarity.NewCompositeSumScorer(), options) if err != nil { _ = leftSearcher.Close() _ = rightSearcher.Close() @@ -85,22 +87,21 @@ func boxSearcher(indexReader index.IndexReader, // build geoboundingbox searcher for that bounding box boxSearcher, err := NewGeoBoundingBoxSearcher(indexReader, - topLeftLon, bottomRightLat, bottomRightLon, topLeftLat, field, boost, - options, checkBoundaries) + topLeftLon, bottomRightLat, bottomRightLon, topLeftLat, field, boost, scorer, + compScorer, options, checkBoundaries, precisionStep) if err != nil { return nil, err } return boxSearcher, nil } -func buildDistFilter(dvReader index.DocValueReader, field string, - centerLon, centerLat, maxDist float64) FilterFunc { +func buildDistFilter(dvReader segment.DocumentValueReader, centerLon, centerLat, maxDist float64) FilterFunc { return func(d *search.DocumentMatch) bool { // check geo matches against all numeric type terms indexed var lons, lats []float64 var found bool - err := dvReader.VisitDocValues(d.IndexInternalID, func(field string, term []byte) { + err := dvReader.VisitDocumentValues(d.Number, func(field string, term []byte) { // only consider the values which are shifted 0 prefixCoded := numeric.PrefixCoded(term) shift, err := prefixCoded.Shift() diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopolygon.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_geopolygon.go similarity index 74% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopolygon.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_geopolygon.go index 9f7e61d9e..18313e12a 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopolygon.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_geopolygon.go @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,19 +16,23 @@ package searcher import ( "fmt" - "github.com/blevesearch/bleve/v2/geo" - "github.com/blevesearch/bleve/v2/numeric" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" "math" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blugelabs/bluge/numeric" + "github.com/blugelabs/bluge/numeric/geo" + "github.com/blugelabs/bluge/search" ) -func NewGeoBoundedPolygonSearcher(indexReader index.IndexReader, - polygon []geo.Point, field string, boost float64, - options search.SearcherOptions) (search.Searcher, error) { +const minPointsInPolygon = 3 - if len(polygon) < 3 { - return nil, fmt.Errorf("Too few points specified for the polygon boundary") +func NewGeoBoundedPolygonSearcher(indexReader search.Reader, + polygon []geo.Point, field string, boost float64, scorer search.Scorer, + compScorer search.CompositeScorer, options search.SearcherOptions, + precisionStep uint) (search.Searcher, error) { + if len(polygon) < minPointsInPolygon { + return nil, fmt.Errorf("too few points specified for the polygon boundary") } // compute the bounding box enclosing the polygon @@ -41,19 +45,18 @@ func NewGeoBoundedPolygonSearcher(indexReader index.IndexReader, // build a searcher for the bounding box on the polygon boxSearcher, err := boxSearcher(indexReader, topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, - field, boost, options, true) + field, boost, scorer, compScorer, options, true, precisionStep) if err != nil { return nil, err } - dvReader, err := indexReader.DocValueReader([]string{field}) + dvReader, err := indexReader.DocumentValueReader([]string{field}) if err != nil { return nil, err } // wrap it in a filtering searcher that checks for the polygon inclusivity - return NewFilteringSearcher(boxSearcher, - buildPolygonFilter(dvReader, field, polygon)), nil + return NewFilteringSearcher(boxSearcher, buildPolygonFilter(dvReader, polygon)), nil } const float64EqualityThreshold = 1e-6 @@ -65,14 +68,13 @@ func almostEqual(a, b float64) bool { // buildPolygonFilter returns true if the point lies inside the // polygon. It is based on the ray-casting technique as referred // here: https://wrf.ecse.rpi.edu/nikola/pubdetails/pnpoly.html -func buildPolygonFilter(dvReader index.DocValueReader, field string, - polygon []geo.Point) FilterFunc { +func buildPolygonFilter(dvReader segment.DocumentValueReader, polygon []geo.Point) FilterFunc { return func(d *search.DocumentMatch) bool { // check geo matches against all numeric type terms indexed var lons, lats []float64 var found bool - err := dvReader.VisitDocValues(d.IndexInternalID, func(field string, term []byte) { + err := dvReader.VisitDocumentValues(d.Number, func(field string, term []byte) { // only consider the values which are shifted 0 prefixCoded := numeric.PrefixCoded(term) shift, err := prefixCoded.Shift() @@ -90,7 +92,7 @@ func buildPolygonFilter(dvReader index.DocValueReader, field string, // the polygon. ie it might fail for certain points on the polygon boundaries. if err == nil && found { nVertices := len(polygon) - if len(polygon) < 3 { + if len(polygon) < minPointsInPolygon { return false } rayIntersectsSegment := func(point, a, b geo.Point) bool { diff --git a/vendor/github.com/blugelabs/bluge/search/searcher/search_match_all.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_match_all.go new file mode 100644 index 000000000..84d0495f6 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_match_all.go @@ -0,0 +1,112 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package searcher + +import ( + "github.com/blugelabs/bluge/search" + segment "github.com/blugelabs/bluge_segment_api" +) + +type MatchAllSearcher struct { + reader segment.PostingsIterator + scorer search.Scorer + indexReader search.Reader + options search.SearcherOptions +} + +func NewMatchAllSearcher(indexReader search.Reader, boost float64, scorer search.Scorer, + options search.SearcherOptions) (*MatchAllSearcher, error) { + reader, err := indexReader.PostingsIterator(nil, "", + false, false, false) + if err != nil { + return nil, err + } + return &MatchAllSearcher{ + indexReader: indexReader, + reader: reader, + scorer: scorer, + options: options, + }, nil +} + +func (s *MatchAllSearcher) Size() int { + return reflectStaticSizeMatchAllSearcher + sizeOfPtr + + s.reader.Size() +} + +func (s *MatchAllSearcher) Count() uint64 { + return s.reader.Count() +} + +func (s *MatchAllSearcher) Next(ctx *search.Context) (*search.DocumentMatch, error) { + tfd, err := s.reader.Next() + if err != nil { + return nil, err + } + + if tfd == nil { + return nil, nil + } + + // score match + docMatch := s.buildDocumentMatch(ctx, tfd) + + // return doc match + return docMatch, nil +} + +func (s *MatchAllSearcher) Advance(ctx *search.Context, number uint64) (*search.DocumentMatch, error) { + tfd, err := s.reader.Advance(number) + if err != nil { + return nil, err + } + + if tfd == nil { + return nil, nil + } + + // score match + docMatch := s.buildDocumentMatch(ctx, tfd) + + // return doc match + return docMatch, nil +} + +func (s *MatchAllSearcher) Close() error { + return s.reader.Close() +} + +func (s *MatchAllSearcher) Min() int { + return 0 +} + +func (s *MatchAllSearcher) DocumentMatchPoolSize() int { + return 1 +} + +func (s *MatchAllSearcher) buildDocumentMatch(ctx *search.Context, termMatch segment.Posting) *search.DocumentMatch { + rv := ctx.DocumentMatchPool.Get() + rv.SetReader(s.indexReader) + rv.Number = termMatch.Number() + + if s.options.Explain { + rv.Explanation = s.scorer.Explain(termMatch.Frequency(), termMatch.Norm()) + rv.Score = rv.Explanation.Value + } else { + rv.Score = s.scorer.Score(termMatch.Frequency(), termMatch.Norm()) + } + + return rv +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_none.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_match_none.go similarity index 51% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_none.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_match_none.go index b7f76941e..76fbe93a9 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_none.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_match_none.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,32 +15,17 @@ package searcher import ( - "reflect" - - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" + "github.com/blugelabs/bluge/search" ) -var reflectStaticSizeMatchNoneSearcher int - -func init() { - var mns MatchNoneSearcher - reflectStaticSizeMatchNoneSearcher = int(reflect.TypeOf(mns).Size()) -} - -type MatchNoneSearcher struct { - indexReader index.IndexReader -} +type MatchNoneSearcher struct{} -func NewMatchNoneSearcher(indexReader index.IndexReader) (*MatchNoneSearcher, error) { - return &MatchNoneSearcher{ - indexReader: indexReader, - }, nil +func NewMatchNoneSearcher(indexReader search.Reader, options search.SearcherOptions) (*MatchNoneSearcher, error) { + return &MatchNoneSearcher{}, nil } func (s *MatchNoneSearcher) Size() int { - return reflectStaticSizeMatchNoneSearcher + size.SizeOfPtr + return reflectStaticSizeMatchNoneSearcher + sizeOfPtr } func (s *MatchNoneSearcher) Count() uint64 { @@ -48,18 +33,16 @@ func (s *MatchNoneSearcher) Count() uint64 { } func (s *MatchNoneSearcher) Weight() float64 { - return 0.0 + return 0 } -func (s *MatchNoneSearcher) SetQueryNorm(qnorm float64) { - -} +func (s *MatchNoneSearcher) SetQueryNorm(_ float64) {} -func (s *MatchNoneSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, error) { +func (s *MatchNoneSearcher) Next(ctx *search.Context) (*search.DocumentMatch, error) { return nil, nil } -func (s *MatchNoneSearcher) Advance(ctx *search.SearchContext, ID index.IndexInternalID) (*search.DocumentMatch, error) { +func (s *MatchNoneSearcher) Advance(ctx *search.Context, number uint64) (*search.DocumentMatch, error) { return nil, nil } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_multi_term.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_multi_term.go similarity index 57% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_multi_term.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_multi_term.go index 523bf4b55..08be51c3e 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_multi_term.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_multi_term.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,40 +16,59 @@ package searcher import ( "fmt" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" + + "github.com/blugelabs/bluge/search" ) -func NewMultiTermSearcher(indexReader index.IndexReader, terms []string, - field string, boost float64, options search.SearcherOptions, limit bool) ( +func NewMultiTermSearcher(indexReader search.Reader, terms []string, + field string, boost float64, scorer search.Scorer, compScorer search.CompositeScorer, + options search.SearcherOptions, limit bool) ( search.Searcher, error) { - if tooManyClauses(len(terms)) { if optionsDisjunctionOptimizable(options) { - return optimizeMultiTermSearcher(indexReader, terms, field, boost, options) + return optimizeMultiTermSearcher(indexReader, terms, nil, field, boost, scorer, options) } if limit { return nil, tooManyClausesErr(field, len(terms)) } } - qsearchers, err := makeBatchSearchers(indexReader, terms, field, boost, options) + qsearchers, err := makeBatchSearchers(indexReader, terms, nil, field, boost, scorer, options) if err != nil { return nil, err } // build disjunction searcher of these ranges - return newMultiTermSearcherInternal(indexReader, qsearchers, field, boost, - options, limit) + return newMultiTermSearcherInternal(indexReader, qsearchers, compScorer, options, limit) } -func NewMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byte, - field string, boost float64, options search.SearcherOptions, limit bool) ( - search.Searcher, error) { +func NewMultiTermSearcherIndividualBoost(indexReader search.Reader, terms []string, termBoosts []float64, + field string, boost float64, scorer search.Scorer, compScorer search.CompositeScorer, + options search.SearcherOptions, limit bool) (search.Searcher, error) { + if tooManyClauses(len(terms)) { + if optionsDisjunctionOptimizable(options) { + return optimizeMultiTermSearcher(indexReader, terms, termBoosts, field, boost, scorer, options) + } + if limit { + return nil, tooManyClausesErr(field, len(terms)) + } + } + qsearchers, err := makeBatchSearchers(indexReader, terms, termBoosts, field, boost, scorer, options) + if err != nil { + return nil, err + } + + // build disjunction searcher of these ranges + return newMultiTermSearcherInternal(indexReader, qsearchers, compScorer, options, limit) +} + +func NewMultiTermSearcherBytes(indexReader search.Reader, terms [][]byte, + field string, boost float64, scorer search.Scorer, compScorer search.CompositeScorer, + options search.SearcherOptions, limit bool) (search.Searcher, error) { if tooManyClauses(len(terms)) { if optionsDisjunctionOptimizable(options) { - return optimizeMultiTermSearcherBytes(indexReader, terms, field, boost, options) + return optimizeMultiTermSearcherBytes(indexReader, terms, field, boost, scorer, options) } if limit { @@ -57,23 +76,21 @@ func NewMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byte, } } - qsearchers, err := makeBatchSearchersBytes(indexReader, terms, field, boost, options) + qsearchers, err := makeBatchSearchersBytes(indexReader, terms, field, boost, scorer, options) if err != nil { return nil, err } // build disjunction searcher of these ranges - return newMultiTermSearcherInternal(indexReader, qsearchers, field, boost, - options, limit) + return newMultiTermSearcherInternal(indexReader, qsearchers, compScorer, options, limit) } -func newMultiTermSearcherInternal(indexReader index.IndexReader, - searchers []search.Searcher, field string, boost float64, +func newMultiTermSearcherInternal(indexReader search.Reader, + searchers []search.Searcher, compScorer search.CompositeScorer, options search.SearcherOptions, limit bool) ( search.Searcher, error) { - // build disjunction searcher of these ranges - searcher, err := newDisjunctionSearcher(indexReader, searchers, 0, options, + searcher, err := newDisjunctionSearcher(indexReader, searchers, 0, compScorer, options, limit) if err != nil { for _, s := range searchers { @@ -85,20 +102,27 @@ func newMultiTermSearcherInternal(indexReader index.IndexReader, return searcher, nil } -func optimizeMultiTermSearcher(indexReader index.IndexReader, terms []string, - field string, boost float64, options search.SearcherOptions) ( +func optimizeMultiTermSearcher(indexReader search.Reader, terms []string, termBoosts []float64, + field string, boost float64, scorer search.Scorer, options search.SearcherOptions) ( search.Searcher, error) { var finalSearcher search.Searcher for len(terms) > 0 { var batchTerms []string + var batchBoosts []float64 if len(terms) > DisjunctionMaxClauseCount { batchTerms = terms[:DisjunctionMaxClauseCount] terms = terms[DisjunctionMaxClauseCount:] + if termBoosts != nil { + batchBoosts = termBoosts[:DisjunctionMaxClauseCount] + termBoosts = termBoosts[DisjunctionMaxClauseCount:] + } } else { batchTerms = terms terms = nil + batchBoosts = termBoosts + termBoosts = nil } - batch, err := makeBatchSearchers(indexReader, batchTerms, field, boost, options) + batch, err := makeBatchSearchers(indexReader, batchTerms, batchBoosts, field, boost, scorer, options) if err != nil { return nil, err } @@ -127,9 +151,8 @@ func optimizeMultiTermSearcher(indexReader index.IndexReader, terms []string, return finalSearcher, nil } -func makeBatchSearchers(indexReader index.IndexReader, terms []string, field string, - boost float64, options search.SearcherOptions) ([]search.Searcher, error) { - +func makeBatchSearchers(indexReader search.Reader, terms []string, termBoosts []float64, field string, + boost float64, scorer search.Scorer, options search.SearcherOptions) ([]search.Searcher, error) { qsearchers := make([]search.Searcher, len(terms)) qsearchersClose := func() { for _, searcher := range qsearchers { @@ -140,7 +163,11 @@ func makeBatchSearchers(indexReader index.IndexReader, terms []string, field str } for i, term := range terms { var err error - qsearchers[i], err = NewTermSearcher(indexReader, term, field, boost, options) + if termBoosts != nil { + qsearchers[i], err = NewTermSearcher(indexReader, term, field, boost*termBoosts[i], scorer, options) + } else { + qsearchers[i], err = NewTermSearcher(indexReader, term, field, boost, scorer, options) + } if err != nil { qsearchersClose() return nil, err @@ -149,10 +176,9 @@ func makeBatchSearchers(indexReader index.IndexReader, terms []string, field str return qsearchers, nil } -func optimizeMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byte, - field string, boost float64, options search.SearcherOptions) ( +func optimizeMultiTermSearcherBytes(indexReader search.Reader, terms [][]byte, + field string, boost float64, scorer search.Scorer, options search.SearcherOptions) ( search.Searcher, error) { - var finalSearcher search.Searcher for len(terms) > 0 { var batchTerms [][]byte @@ -163,7 +189,7 @@ func optimizeMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byt batchTerms = terms terms = nil } - batch, err := makeBatchSearchersBytes(indexReader, batchTerms, field, boost, options) + batch, err := makeBatchSearchersBytes(indexReader, batchTerms, field, boost, scorer, options) if err != nil { return nil, err } @@ -192,9 +218,8 @@ func optimizeMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byt return finalSearcher, nil } -func makeBatchSearchersBytes(indexReader index.IndexReader, terms [][]byte, field string, - boost float64, options search.SearcherOptions) ([]search.Searcher, error) { - +func makeBatchSearchersBytes(indexReader search.Reader, terms [][]byte, field string, + boost float64, scorer search.Scorer, options search.SearcherOptions) ([]search.Searcher, error) { qsearchers := make([]search.Searcher, len(terms)) qsearchersClose := func() { for _, searcher := range qsearchers { @@ -205,7 +230,7 @@ func makeBatchSearchersBytes(indexReader index.IndexReader, terms [][]byte, fiel } for i, term := range terms { var err error - qsearchers[i], err = NewTermSearcherBytes(indexReader, term, field, boost, options) + qsearchers[i], err = NewTermSearcherBytes(indexReader, term, field, boost, scorer, options) if err != nil { qsearchersClose() return nil, err diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_numeric_range.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_numeric_range.go similarity index 63% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_numeric_range.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_numeric_range.go index 6ab5147be..07976dacc 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_numeric_range.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_numeric_range.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,117 +17,75 @@ package searcher import ( "bytes" "math" - "sort" - "github.com/blevesearch/bleve/v2/numeric" - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blugelabs/bluge/numeric" + "github.com/blugelabs/bluge/search" ) -func NewNumericRangeSearcher(indexReader index.IndexReader, - min *float64, max *float64, inclusiveMin, inclusiveMax *bool, field string, - boost float64, options search.SearcherOptions) (search.Searcher, error) { - // account for unbounded edges - if min == nil { - negInf := math.Inf(-1) - min = &negInf - } - if max == nil { - Inf := math.Inf(1) - max = &Inf - } - if inclusiveMin == nil { - defaultInclusiveMin := true - inclusiveMin = &defaultInclusiveMin +func NewNumericRangeSearcher(indexReader search.Reader, + min, max float64, inclusiveMin, inclusiveMax bool, field string, + boost float64, scorer search.Scorer, compScorer search.CompositeScorer, + options search.SearcherOptions) (search.Searcher, error) { + var minInt64 int64 + if math.IsInf(min, -1) { + minInt64 = math.MinInt64 + } else { + minInt64 = numeric.Float64ToInt64(min) } - if inclusiveMax == nil { - defaultInclusiveMax := false - inclusiveMax = &defaultInclusiveMax + var maxInt64 int64 + if math.IsInf(max, 1) { + maxInt64 = math.MaxInt64 + } else { + maxInt64 = numeric.Float64ToInt64(max) } + // find all the ranges - minInt64 := numeric.Float64ToInt64(*min) - if !*inclusiveMin && minInt64 != math.MaxInt64 { + if !inclusiveMin && minInt64 != math.MaxInt64 { minInt64++ } - maxInt64 := numeric.Float64ToInt64(*max) - if !*inclusiveMax && maxInt64 != math.MinInt64 { + if !inclusiveMax && maxInt64 != math.MinInt64 { maxInt64-- } - var fieldDict index.FieldDictContains + var fieldDict segment.DictionaryLookup var isIndexed filterFunc var err error - if irr, ok := indexReader.(index.IndexReaderContains); ok { - fieldDict, err = irr.FieldDictContains(field) - if err != nil { - return nil, err - } - isIndexed = func(term []byte) bool { - found, err := fieldDict.Contains(term) - return err == nil && found - } + fieldDict, err = indexReader.DictionaryLookup(field) + if err != nil { + return nil, err + } + + isIndexed = func(term []byte) bool { + found, err2 := fieldDict.Contains(term) + return err2 == nil && found } // FIXME hard-coded precision, should match field declaration termRanges := splitInt64Range(minInt64, maxInt64, 4) terms := termRanges.Enumerate(isIndexed) if fieldDict != nil { - if fd, ok := fieldDict.(index.FieldDict); ok { - if err = fd.Close(); err != nil { - return nil, err - } + err = fieldDict.Close() + if err != nil { + return nil, err } } if len(terms) < 1 { // cannot return MatchNoneSearcher because of interaction with // commit f391b991c20f02681bacd197afc6d8aed444e132 - return NewMultiTermSearcherBytes(indexReader, terms, field, boost, options, - true) - } - - // for upside_down - if isIndexed == nil { - terms, err = filterCandidateTerms(indexReader, terms, field) - if err != nil { - return nil, err - } + return NewMultiTermSearcherBytes(indexReader, terms, field, boost, scorer, compScorer, + options, true) } if tooManyClauses(len(terms)) { return nil, tooManyClausesErr(field, len(terms)) } - return NewMultiTermSearcherBytes(indexReader, terms, field, boost, options, - true) -} - -func filterCandidateTerms(indexReader index.IndexReader, - terms [][]byte, field string) (rv [][]byte, err error) { - - fieldDict, err := indexReader.FieldDictRange(field, terms[0], terms[len(terms)-1]) - if err != nil { - return nil, err - } - - // enumerate the terms and check against list of terms - tfd, err := fieldDict.Next() - for err == nil && tfd != nil { - termBytes := []byte(tfd.Term) - i := sort.Search(len(terms), func(i int) bool { return bytes.Compare(terms[i], termBytes) >= 0 }) - if i < len(terms) && bytes.Compare(terms[i], termBytes) == 0 { - rv = append(rv, terms[i]) - } - terms = terms[i:] - tfd, err = fieldDict.Next() - } - - if cerr := fieldDict.Close(); cerr != nil && err == nil { - err = cerr - } - - return rv, err + return NewMultiTermSearcherBytes(indexReader, terms, field, boost, scorer, compScorer, + options, true) } type termRange struct { @@ -155,7 +113,7 @@ func incrementBytes(in []byte) []byte { rv := make([]byte, len(in)) copy(rv, in) for i := len(rv) - 1; i >= 0; i-- { - rv[i] = rv[i] + 1 + rv[i]++ if rv[i] != 0 { // didn't overflow, so stop break @@ -182,7 +140,6 @@ func splitInt64Range(minBound, maxBound int64, precisionStep uint) termRanges { } for shift := uint(0); ; shift += precisionStep { - diff := int64(1) << (shift + precisionStep) mask := ((int64(1) << precisionStep) - int64(1)) << shift hasLower := (minBound & mask) != int64(0) diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_phrase.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_phrase.go similarity index 72% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_phrase.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_phrase.go index c262fd914..364b282bd 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_phrase.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_phrase.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,34 +16,25 @@ package searcher import ( "fmt" - "math" - "reflect" - "github.com/blevesearch/bleve/v2/search" - "github.com/blevesearch/bleve/v2/size" - index "github.com/blevesearch/bleve_index_api" -) - -var reflectStaticSizePhraseSearcher int + "github.com/blugelabs/bluge/search/similarity" -func init() { - var ps PhraseSearcher - reflectStaticSizePhraseSearcher = int(reflect.TypeOf(ps).Size()) -} + "github.com/blugelabs/bluge/search" +) type PhraseSearcher struct { mustSearcher search.Searcher - queryNorm float64 currMust *search.DocumentMatch terms [][]string path phrasePath paths []phrasePath locations []search.Location initialized bool + slop int } func (s *PhraseSearcher) Size() int { - sizeInBytes := reflectStaticSizePhraseSearcher + size.SizeOfPtr + sizeInBytes := reflectStaticSizePhraseSearcher + sizeOfPtr if s.mustSearcher != nil { sizeInBytes += s.mustSearcher.Size() @@ -54,31 +45,30 @@ func (s *PhraseSearcher) Size() int { } for _, entry := range s.terms { - sizeInBytes += size.SizeOfSlice + sizeInBytes += sizeOfSlice for _, entry1 := range entry { - sizeInBytes += size.SizeOfString + len(entry1) + sizeInBytes += sizeOfString + len(entry1) } } return sizeInBytes } -func NewPhraseSearcher(indexReader index.IndexReader, terms []string, field string, options search.SearcherOptions) (*PhraseSearcher, error) { - // turn flat terms []string into [][]string - mterms := make([][]string, len(terms)) - for i, term := range terms { - mterms[i] = []string{term} - } - return NewMultiPhraseSearcher(indexReader, mterms, field, options) +func NewMultiPhraseSearcher(indexReader search.Reader, terms [][]string, field string, scorer search.Scorer, + options search.SearcherOptions) (*PhraseSearcher, error) { + return NewSloppyMultiPhraseSearcher(indexReader, terms, field, 0, scorer, options) } -func NewMultiPhraseSearcher(indexReader index.IndexReader, terms [][]string, field string, options search.SearcherOptions) (*PhraseSearcher, error) { +// NewSloppyMultiPhraseSearcher create a multi-phrase searcher which tolerates a specified "sloppyness" +// the value of the slop parameter restricts the distance between the terms +func NewSloppyMultiPhraseSearcher(indexReader search.Reader, terms [][]string, field string, slop int, + scorer search.Scorer, options search.SearcherOptions) (*PhraseSearcher, error) { options.IncludeTermVectors = true var termPositionSearchers []search.Searcher for _, termPos := range terms { if len(termPos) == 1 && termPos[0] != "" { // single term - ts, err := NewTermSearcher(indexReader, termPos[0], field, 1.0, options) + ts, err := NewTermSearcher(indexReader, termPos[0], field, 1.0, scorer, options) if err != nil { // close any searchers already opened for _, ts := range termPositionSearchers { @@ -94,7 +84,7 @@ func NewMultiPhraseSearcher(indexReader index.IndexReader, terms [][]string, fie if term == "" { continue } - ts, err := NewTermSearcher(indexReader, term, field, 1.0, options) + ts, err := NewTermSearcher(indexReader, term, field, 1.0, scorer, options) if err != nil { // close any searchers already opened for _, ts := range termPositionSearchers { @@ -104,19 +94,22 @@ func NewMultiPhraseSearcher(indexReader index.IndexReader, terms [][]string, fie } termSearchers = append(termSearchers, ts) } - disjunction, err := NewDisjunctionSearcher(indexReader, termSearchers, 1, options) + disjunction, err := NewDisjunctionSearcher(indexReader, termSearchers, 1, + similarity.NewCompositeSumScorer(), options) if err != nil { // close any searchers already opened for _, ts := range termPositionSearchers { _ = ts.Close() } - return nil, fmt.Errorf("phrase searcher error building term position disjunction searcher: %v", err) + return nil, fmt.Errorf("phrase searcher error building term position disjunction searcher: %v", + err) } termPositionSearchers = append(termPositionSearchers, disjunction) } } - mustSearcher, err := NewConjunctionSearcher(indexReader, termPositionSearchers, options) + mustSearcher, err := NewConjunctionSearcher(indexReader, termPositionSearchers, + similarity.NewCompositeSumScorer(), options) if err != nil { // close any searchers already opened for _, ts := range termPositionSearchers { @@ -129,27 +122,13 @@ func NewMultiPhraseSearcher(indexReader index.IndexReader, terms [][]string, fie rv := PhraseSearcher{ mustSearcher: mustSearcher, terms: terms, - } - rv.computeQueryNorm() - return &rv, nil -} - -func (s *PhraseSearcher) computeQueryNorm() { - // first calculate sum of squared weights - sumOfSquaredWeights := 0.0 - if s.mustSearcher != nil { - sumOfSquaredWeights += s.mustSearcher.Weight() + slop: slop, } - // now compute query norm from this - s.queryNorm = 1.0 / math.Sqrt(sumOfSquaredWeights) - // finally tell all the downstream searchers the norm - if s.mustSearcher != nil { - s.mustSearcher.SetQueryNorm(s.queryNorm) - } + return &rv, nil } -func (s *PhraseSearcher) initSearchers(ctx *search.SearchContext) error { +func (s *PhraseSearcher) initSearchers(ctx *search.Context) error { err := s.advanceNextMust(ctx) if err != nil { return err @@ -159,7 +138,7 @@ func (s *PhraseSearcher) initSearchers(ctx *search.SearchContext) error { return nil } -func (s *PhraseSearcher) advanceNextMust(ctx *search.SearchContext) error { +func (s *PhraseSearcher) advanceNextMust(ctx *search.Context) error { var err error if s.mustSearcher != nil { @@ -175,15 +154,7 @@ func (s *PhraseSearcher) advanceNextMust(ctx *search.SearchContext) error { return nil } -func (s *PhraseSearcher) Weight() float64 { - return s.mustSearcher.Weight() -} - -func (s *PhraseSearcher) SetQueryNorm(qnorm float64) { - s.mustSearcher.SetQueryNorm(qnorm) -} - -func (s *PhraseSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, error) { +func (s *PhraseSearcher) Next(ctx *search.Context) (*search.DocumentMatch, error) { if !s.initialized { err := s.initSearchers(ctx) if err != nil { @@ -193,7 +164,7 @@ func (s *PhraseSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, for s.currMust != nil { // check this match against phrase constraints - rv := s.checkCurrMustMatch(ctx) + rv := s.checkCurrMustMatch() // prepare for next iteration (either loop or subsequent call to Next()) err := s.advanceNextMust(ctx) @@ -214,7 +185,7 @@ func (s *PhraseSearcher) Next(ctx *search.SearchContext) (*search.DocumentMatch, // pointed to by s.currMust (which satisifies the pre-condition searcher) // also satisfies the phase constraints. if so, it returns a DocumentMatch // for this document, otherwise nil -func (s *PhraseSearcher) checkCurrMustMatch(ctx *search.SearchContext) *search.DocumentMatch { +func (s *PhraseSearcher) checkCurrMustMatch() *search.DocumentMatch { s.locations = s.currMust.Complete(s.locations) locations := s.currMust.Locations @@ -227,7 +198,7 @@ func (s *PhraseSearcher) checkCurrMustMatch(ctx *search.SearchContext) *search.D // but, we note that phrase constraints can only be satisfied within // a single field, so we can check them each independently for field, tlm := range locations { - ftls = s.checkCurrMustMatchField(ctx, field, tlm, ftls) + ftls = s.checkCurrMustMatchField(field, tlm, ftls) } if len(ftls) > 0 { @@ -246,23 +217,21 @@ func (s *PhraseSearcher) checkCurrMustMatch(ctx *search.SearchContext) *search.D // satisfies the phase constraints (possibly more than once). if so, // the matching field term locations are appended to the provided // slice -func (s *PhraseSearcher) checkCurrMustMatchField(ctx *search.SearchContext, - field string, tlm search.TermLocationMap, +func (s *PhraseSearcher) checkCurrMustMatchField(field string, tlm search.TermLocationMap, ftls []search.FieldTermLocation) []search.FieldTermLocation { if s.path == nil { s.path = make(phrasePath, 0, len(s.terms)) } - s.paths = findPhrasePaths(0, nil, s.terms, tlm, s.path[:0], 0, s.paths[:0]) + s.paths = findPhrasePaths(0, s.terms, tlm, s.path[:0], s.slop, s.paths[:0]) for _, p := range s.paths { for _, pp := range p { ftls = append(ftls, search.FieldTermLocation{ Field: field, Term: pp.term, Location: search.Location{ - Pos: pp.loc.Pos, - Start: pp.loc.Start, - End: pp.loc.End, - ArrayPositions: pp.loc.ArrayPositions, + Pos: pp.loc.Pos, + Start: pp.loc.Start, + End: pp.loc.End, }, }) } @@ -304,9 +273,6 @@ func (p phrasePath) String() string { // arguments and return values. // // prevPos - the previous location, 0 on first invocation -// ap - array positions of the first candidate phrase part to -// which further recursive phrase parts must match, -// nil on initial invocation or when there are no array positions // phraseTerms - slice containing the phrase terms, // may contain empty string as placeholder (don't care) // tlm - the Term Location Map containing all relevant term locations @@ -319,7 +285,7 @@ func (p phrasePath) String() string { // rv - the final result being appended to by all the recursive calls // // returns slice of paths, or nil if invocation did not find any successul paths -func findPhrasePaths(prevPos uint64, ap search.ArrayPositions, phraseTerms [][]string, +func findPhrasePaths(prevPos int, phraseTerms [][]string, tlm search.TermLocationMap, p phrasePath, remainingSlop int, rv []phrasePath) []phrasePath { // no more terms if len(phraseTerms) < 1 { @@ -344,7 +310,7 @@ func findPhrasePaths(prevPos uint64, ap search.ArrayPositions, phraseTerms [][]s // if prevPos was 0, don't set it to 1 (as thats not a real abs pos) nextPos = 0 // don't advance nextPos if prevPos was 0 } - return findPhrasePaths(nextPos, ap, cdr, tlm, p, remainingSlop, rv) + return findPhrasePaths(nextPos, cdr, tlm, p, remainingSlop, rv) } // locations for this term @@ -352,11 +318,6 @@ func findPhrasePaths(prevPos uint64, ap search.ArrayPositions, phraseTerms [][]s locations := tlm[carTerm] LOCATIONS_LOOP: for _, loc := range locations { - if prevPos != 0 && !loc.ArrayPositions.Equals(ap) { - // if the array positions are wrong, can't match, try next location - continue - } - // compute distance from previous phrase term dist := 0 if prevPos != 0 { @@ -374,22 +335,22 @@ func findPhrasePaths(prevPos uint64, ap search.ArrayPositions, phraseTerms [][]s // this location works, add it to the path (but not for empty term) px := append(p, phrasePart{term: carTerm, loc: loc}) - rv = findPhrasePaths(loc.Pos, loc.ArrayPositions, cdr, tlm, px, remainingSlop-dist, rv) + rv = findPhrasePaths(loc.Pos, cdr, tlm, px, remainingSlop-dist, rv) } } } return rv } -func editDistance(p1, p2 uint64) int { - dist := int(p1 - p2) +func editDistance(p1, p2 int) int { + dist := p1 - p2 if dist < 0 { return -dist } return dist } -func (s *PhraseSearcher) Advance(ctx *search.SearchContext, ID index.IndexInternalID) (*search.DocumentMatch, error) { +func (s *PhraseSearcher) Advance(ctx *search.Context, number uint64) (*search.DocumentMatch, error) { if !s.initialized { err := s.initSearchers(ctx) if err != nil { @@ -397,7 +358,7 @@ func (s *PhraseSearcher) Advance(ctx *search.SearchContext, ID index.IndexIntern } } if s.currMust != nil { - if s.currMust.IndexInternalID.Compare(ID) >= 0 { + if s.currMust.Number >= number { return s.Next(ctx) } ctx.DocumentMatchPool.Put(s.currMust) @@ -406,7 +367,7 @@ func (s *PhraseSearcher) Advance(ctx *search.SearchContext, ID index.IndexIntern return nil, nil } var err error - s.currMust, err = s.mustSearcher.Advance(ctx, ID) + s.currMust, err = s.mustSearcher.Advance(ctx, number) if err != nil { return nil, err } diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/regexp.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_regexp.go similarity index 60% rename from vendor/github.com/blevesearch/bleve/v2/index/scorch/regexp.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_regexp.go index 5a3584f51..36ca5a803 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/regexp.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_regexp.go @@ -12,14 +12,50 @@ // See the License for the specific language governing permissions and // limitations under the License. -package scorch +package searcher import ( "regexp/syntax" "github.com/blevesearch/vellum/regexp" + "github.com/blugelabs/bluge/search" ) +// NewRegexpStringSearcher is similar to NewRegexpSearcher, but +// additionally optimizes for index readers that handle regexp's. +func NewRegexpStringSearcher(indexReader search.Reader, pattern, field string, + boost float64, scorer search.Scorer, compScorer search.CompositeScorer, + options search.SearcherOptions) (search.Searcher, error) { + a, prefixBeg, prefixEnd, err := parseRegexp(pattern) + if err != nil { + return nil, err + } + + fieldDict, err := indexReader.DictionaryIterator(field, a, prefixBeg, prefixEnd) + if err != nil { + return nil, err + } + defer func() { + if cerr := fieldDict.Close(); cerr != nil && err == nil { + err = cerr + } + }() + + var candidateTerms []string + + tfd, err := fieldDict.Next() + for err == nil && tfd != nil { + candidateTerms = append(candidateTerms, tfd.Term()) + tfd, err = fieldDict.Next() + } + if err != nil { + return nil, err + } + + return NewMultiTermSearcher(indexReader, candidateTerms, field, boost, scorer, + compScorer, options, true) +} + func parseRegexp(pattern string) (a *regexp.Regexp, prefixBeg, prefixEnd []byte, err error) { // TODO: potential optimization where syntax.Regexp supports a Simplify() API? @@ -36,7 +72,7 @@ func parseRegexp(pattern string) (a *regexp.Regexp, prefixBeg, prefixEnd []byte, prefix := literalPrefix(parsed) if prefix != "" { prefixBeg := []byte(prefix) - prefixEnd := calculateExclusiveEndFromPrefix(prefixBeg) + prefixEnd := incrementBytes(prefixBeg) return re, prefixBeg, prefixEnd, nil } diff --git a/vendor/github.com/blugelabs/bluge/search/searcher/search_term.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_term.go new file mode 100644 index 000000000..9fbd3b22f --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_term.go @@ -0,0 +1,167 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package searcher + +import ( + "github.com/blugelabs/bluge/search" + segment "github.com/blugelabs/bluge_segment_api" +) + +type TermSearcher struct { + indexReader search.Reader + reader segment.PostingsIterator + options search.SearcherOptions + scorer search.Scorer + queryTerm string +} + +func NewTermSearcher(indexReader search.Reader, term, field string, boost float64, scorer search.Scorer, + options search.SearcherOptions) (*TermSearcher, error) { + return NewTermSearcherBytes(indexReader, []byte(term), field, boost, scorer, options) +} + +func NewTermSearcherBytes(indexReader search.Reader, term []byte, field string, boost float64, scorer search.Scorer, + options search.SearcherOptions) (*TermSearcher, error) { + needFreqNorm := options.Score != "none" + reader, err := indexReader.PostingsIterator(term, field, needFreqNorm, needFreqNorm, options.IncludeTermVectors) + if err != nil { + return nil, err + } + return newTermSearcherFromReader(indexReader, reader, term, field, boost, scorer, options) +} + +type termStatsWrapper struct { + docFreq uint64 +} + +func (t *termStatsWrapper) DocumentFrequency() uint64 { + return t.docFreq +} + +func newTermSearcherFromReader(indexReader search.Reader, reader segment.PostingsIterator, + term []byte, field string, boost float64, scorer search.Scorer, options search.SearcherOptions) (*TermSearcher, error) { + if scorer == nil { + collStats, err := indexReader.CollectionStats(field) + if err != nil { + return nil, err + } + scorer = options.SimilarityForField(field).Scorer(boost, collStats, &termStatsWrapper{docFreq: reader.Count()}) + } + return &TermSearcher{ + indexReader: indexReader, + reader: reader, + scorer: scorer, + options: options, + queryTerm: string(term), + }, nil +} + +func (s *TermSearcher) Size() int { + return reflectStaticSizeTermSearcher + sizeOfPtr + s.reader.Size() +} + +func (s *TermSearcher) Count() uint64 { + return s.reader.Count() +} + +func (s *TermSearcher) Next(ctx *search.Context) (*search.DocumentMatch, error) { + termMatch, err := s.reader.Next() + if err != nil { + return nil, err + } + + if termMatch == nil { + return nil, nil + } + + // score match + docMatch := s.buildDocumentMatch(ctx, termMatch) + + // return doc match + return docMatch, nil +} + +func (s *TermSearcher) Advance(ctx *search.Context, number uint64) (*search.DocumentMatch, error) { + termMatch, err := s.reader.Advance(number) + if err != nil { + return nil, err + } + + if termMatch == nil { + return nil, nil + } + + // score match + docMatch := s.buildDocumentMatch(ctx, termMatch) + + // return doc match + return docMatch, nil +} + +func (s *TermSearcher) Close() error { + return s.reader.Close() +} + +func (s *TermSearcher) Min() int { + return 0 +} + +func (s *TermSearcher) DocumentMatchPoolSize() int { + return 1 +} + +func (s *TermSearcher) Optimize(kind string, octx segment.OptimizableContext) ( + segment.OptimizableContext, error) { + o, ok := s.reader.(segment.Optimizable) + if ok { + return o.Optimize(kind, octx) + } + + return nil, nil +} + +func (s *TermSearcher) buildDocumentMatch(ctx *search.Context, termMatch segment.Posting) *search.DocumentMatch { + rv := ctx.DocumentMatchPool.Get() + rv.SetReader(s.indexReader) + rv.Number = termMatch.Number() + + if s.options.Explain { + rv.Explanation = s.scorer.Explain(termMatch.Frequency(), termMatch.Norm()) + rv.Score = rv.Explanation.Value + } else { + rv.Score = s.scorer.Score(termMatch.Frequency(), termMatch.Norm()) + } + + if len(termMatch.Locations()) > 0 { + if cap(rv.FieldTermLocations) < len(termMatch.Locations()) { + rv.FieldTermLocations = make([]search.FieldTermLocation, 0, len(termMatch.Locations())) + } + + for _, v := range termMatch.Locations() { + rv.FieldTermLocations = + append(rv.FieldTermLocations, search.FieldTermLocation{ + Field: v.Field(), + Term: s.queryTerm, + Location: search.Location{ + Pos: v.Pos(), + Start: v.Start(), + End: v.End(), + }, + }) + } + } + + return rv +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_prefix.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_term_prefix.go similarity index 64% rename from vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_prefix.go rename to vendor/github.com/blugelabs/bluge/search/searcher/search_term_prefix.go index a01b18690..7d39d3187 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_prefix.go +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_term_prefix.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,15 +15,16 @@ package searcher import ( - "github.com/blevesearch/bleve/v2/search" - index "github.com/blevesearch/bleve_index_api" + "github.com/blugelabs/bluge/search" ) -func NewTermPrefixSearcher(indexReader index.IndexReader, prefix string, - field string, boost float64, options search.SearcherOptions) ( - search.Searcher, error) { +func NewTermPrefixSearcher(indexReader search.Reader, prefix, field string, + boost float64, scorer search.Scorer, compScorer search.CompositeScorer, + options search.SearcherOptions) (search.Searcher, error) { // find the terms with this prefix - fieldDict, err := indexReader.FieldDictPrefix(field, []byte(prefix)) + kBeg := []byte(prefix) + kEnd := incrementBytes(kBeg) + fieldDict, err := indexReader.DictionaryIterator(field, nil, kBeg, kEnd) if err != nil { return nil, err } @@ -36,7 +37,7 @@ func NewTermPrefixSearcher(indexReader index.IndexReader, prefix string, var terms []string tfd, err := fieldDict.Next() for err == nil && tfd != nil { - terms = append(terms, tfd.Term) + terms = append(terms, tfd.Term()) if tooManyClauses(len(terms)) { return nil, tooManyClausesErr(field, len(terms)) } @@ -46,5 +47,5 @@ func NewTermPrefixSearcher(indexReader index.IndexReader, prefix string, return nil, err } - return NewMultiTermSearcher(indexReader, terms, field, boost, options, true) + return NewMultiTermSearcher(indexReader, terms, field, boost, scorer, compScorer, options, true) } diff --git a/vendor/github.com/blugelabs/bluge/search/searcher/search_term_range.go b/vendor/github.com/blugelabs/bluge/search/searcher/search_term_range.go new file mode 100644 index 000000000..e6d0179b7 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/searcher/search_term_range.go @@ -0,0 +1,67 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package searcher + +import ( + "github.com/blugelabs/bluge/search" +) + +func NewTermRangeSearcher(indexReader search.Reader, + min, max []byte, inclusiveMin, inclusiveMax bool, field string, + boost float64, scorer search.Scorer, compScorer search.CompositeScorer, + options search.SearcherOptions) (search.Searcher, error) { + if min == nil { + min = []byte{} + } + + if max != nil && inclusiveMax { + max = append(max, 0) + } + + fieldDict, err := indexReader.DictionaryIterator(field, nil, min, max) + if err != nil { + return nil, err + } + + defer func() { + if cerr := fieldDict.Close(); cerr != nil && err == nil { + err = cerr + } + }() + + var terms []string + tfd, err := fieldDict.Next() + for err == nil && tfd != nil { + terms = append(terms, tfd.Term()) + tfd, err = fieldDict.Next() + } + if err != nil { + return nil, err + } + + if len(terms) < 1 { + return NewMatchNoneSearcher(indexReader, options) + } + + if !inclusiveMin && min != nil && string(min) == terms[0] { + terms = terms[1:] + // check again, as we might have removed only entry + if len(terms) < 1 { + return NewMatchNoneSearcher(indexReader, options) + } + } + + return NewMultiTermSearcher(indexReader, terms, field, boost, scorer, compScorer, options, true) +} diff --git a/vendor/github.com/blugelabs/bluge/search/searcher/size.go b/vendor/github.com/blugelabs/bluge/search/searcher/size.go new file mode 100644 index 000000000..6797dea69 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/searcher/size.go @@ -0,0 +1,67 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package searcher + +import ( + "reflect" +) + +func init() { + var i int + sizeOfInt = int(reflect.TypeOf(i).Size()) + var ptr *int + sizeOfPtr = int(reflect.TypeOf(ptr).Size()) + var slice []int + sizeOfSlice = int(reflect.TypeOf(slice).Size()) + var str string + sizeOfString = int(reflect.TypeOf(str).Size()) + + var bs BooleanSearcher + reflectStaticSizeBooleanSearcher = int(reflect.TypeOf(bs).Size()) + var cs ConjunctionSearcher + reflectStaticSizeConjunctionSearcher = int(reflect.TypeOf(cs).Size()) + var dhs DisjunctionHeapSearcher + reflectStaticSizeDisjunctionHeapSearcher = int(reflect.TypeOf(dhs).Size()) + var sc searcherCurr + reflectStaticSizeSearcherCurr = int(reflect.TypeOf(sc).Size()) + var ds DisjunctionSliceSearcher + reflectStaticSizeDisjunctionSliceSearcher = int(reflect.TypeOf(ds).Size()) + var fs FilteringSearcher + reflectStaticSizeFilteringSearcher = int(reflect.TypeOf(fs).Size()) + var mas MatchAllSearcher + reflectStaticSizeMatchAllSearcher = int(reflect.TypeOf(mas).Size()) + var mns MatchNoneSearcher + reflectStaticSizeMatchNoneSearcher = int(reflect.TypeOf(mns).Size()) + var ps PhraseSearcher + reflectStaticSizePhraseSearcher = int(reflect.TypeOf(ps).Size()) + var ts TermSearcher + reflectStaticSizeTermSearcher = int(reflect.TypeOf(ts).Size()) +} + +var sizeOfInt int +var sizeOfPtr int +var sizeOfSlice int +var sizeOfString int + +var reflectStaticSizeBooleanSearcher int +var reflectStaticSizeConjunctionSearcher int +var reflectStaticSizeDisjunctionHeapSearcher int +var reflectStaticSizeSearcherCurr int +var reflectStaticSizeDisjunctionSliceSearcher int +var reflectStaticSizeFilteringSearcher int +var reflectStaticSizeMatchAllSearcher int +var reflectStaticSizeMatchNoneSearcher int +var reflectStaticSizePhraseSearcher int +var reflectStaticSizeTermSearcher int diff --git a/vendor/github.com/blugelabs/bluge/search/similarity/bm25.go b/vendor/github.com/blugelabs/bluge/search/similarity/bm25.go new file mode 100644 index 000000000..adf1737ae --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/similarity/bm25.go @@ -0,0 +1,137 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package similarity + +import ( + "fmt" + "math" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blugelabs/bluge/search" +) + +const defaultB = 0.75 +const defaultK1 = 1.2 + +type BM25Similarity struct { + b float64 + k1 float64 +} + +func NewBM25Similarity() *BM25Similarity { + return NewBM25SimilarityBK1(defaultB, defaultK1) +} + +func NewBM25SimilarityBK1(b, k1 float64) *BM25Similarity { + return &BM25Similarity{ + b: b, + k1: k1, + } +} + +// fixme chec normbits1hit in zap + +func (b *BM25Similarity) ComputeNorm(numTerms int) float32 { + return math.Float32frombits(uint32(numTerms)) +} + +func (b *BM25Similarity) Idf(docFreq, docCount uint64) float64 { + return math.Log(1.0 + float64(docCount-docFreq) + 0.5/(float64(docFreq)+0.5)) +} + +func (b *BM25Similarity) IdfExplainTerm(collectionStats segment.CollectionStats, termStats segment.TermStats) *search.Explanation { + docFreq := termStats.DocumentFrequency() + var docCount uint64 + if collectionStats != nil { + docCount = collectionStats.DocumentCount() + } + idf := b.Idf(docFreq, docCount) + return search.NewExplanation(idf, "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:", + search.NewExplanation(float64(docFreq), "n, number of documents containing term"), + search.NewExplanation(float64(docCount), "N, total number of documents with field")) +} + +func (b *BM25Similarity) AverageFieldLength(stats segment.CollectionStats) float64 { + if stats != nil { + return float64(stats.SumTotalTermFrequency()) / float64(stats.DocumentCount()) + } + return 0 +} + +func (b *BM25Similarity) Scorer(boost float64, collectionStats segment.CollectionStats, termStats segment.TermStats) search.Scorer { + idf := b.IdfExplainTerm(collectionStats, termStats) + return NewBM25Scorer(boost, b.k1, b.b, b.AverageFieldLength(collectionStats), idf) +} + +type BM25Scorer struct { + boost float64 + k1 float64 + b float64 + avgDocLen float64 + weight float64 + idf *search.Explanation +} + +func NewBM25Scorer(boost, k1, b, avgDocLen float64, idf *search.Explanation) *BM25Scorer { + return &BM25Scorer{ + boost: boost, + k1: k1, + b: b, + avgDocLen: avgDocLen, + idf: idf, + weight: boost * idf.Value, + } +} + +func (b *BM25Scorer) Score(freq int, norm float64) float64 { + docLen := math.Float32bits(float32(norm)) + normInverse := 1 / (b.k1 * ((1 - b.b) + b.b*float64(docLen)/b.avgDocLen)) + return b.weight - b.weight/(1+float64(freq)*normInverse) +} + +func (b *BM25Scorer) explainTf(freq int, norm float64) *search.Explanation { + docLen := math.Float32bits(float32(norm)) + normInverse := 1 / (b.k1 * ((1 - b.b) + b.b*float64(docLen)/b.avgDocLen)) + var children []*search.Explanation + children = append(children, + search.NewExplanation(float64(freq), "freq, occurrences of term within document"), + search.NewExplanation(b.k1, "k1, term saturation parameter"), + search.NewExplanation(b.b, "b, length normalization parameter"), + search.NewExplanation(float64(docLen), "dl, length of field"), + search.NewExplanation(b.avgDocLen, "avgdl, average length of field")) + score := 1.0 - 1.0/(1.0+float64(freq)*normInverse) + return search.NewExplanation(score, + "tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:", + children...) +} + +const noBoost = 1.0 + +func (b *BM25Scorer) Explain(freq int, norm float64) *search.Explanation { + var children = []*search.Explanation{ + b.idf, + } + if b.boost != noBoost { + children = append(children, search.NewExplanation(b.boost, "boost")) + } + children = append(children, b.explainTf(freq, norm)) + docLen := math.Float32bits(float32(norm)) + normInverse := 1 / (b.k1 * ((1 - b.b) + b.b*float64(docLen)/b.avgDocLen)) + score := b.weight - b.weight/(1.0+float64(freq)*normInverse) + return search.NewExplanation(score, + fmt.Sprintf("score(freq=%d), computed as boost * idf * tf from:", freq), + children...) +} diff --git a/vendor/github.com/blugelabs/bluge/search/similarity/composite.go b/vendor/github.com/blugelabs/bluge/search/similarity/composite.go new file mode 100644 index 000000000..fb7d0b37c --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/similarity/composite.go @@ -0,0 +1,43 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package similarity + +import "github.com/blugelabs/bluge/search" + +type CompositeSumScorer struct{} + +func NewCompositeSumScorer() *CompositeSumScorer { + return &CompositeSumScorer{} +} + +func (c *CompositeSumScorer) ScoreComposite(constituents []*search.DocumentMatch) float64 { + var rv float64 + for _, constituent := range constituents { + rv += constituent.Score + } + return rv +} + +func (c *CompositeSumScorer) ExplainComposite(constituents []*search.DocumentMatch) *search.Explanation { + var sum float64 + var children []*search.Explanation + for _, constituent := range constituents { + sum += constituent.Score + children = append(children, constituent.Explanation) + } + return search.NewExplanation(sum, + "sum of:", + children...) +} diff --git a/vendor/github.com/blugelabs/bluge/search/similarity/constant.go b/vendor/github.com/blugelabs/bluge/search/similarity/constant.go new file mode 100644 index 000000000..36fc55d31 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/similarity/constant.go @@ -0,0 +1,34 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package similarity + +import "github.com/blugelabs/bluge/search" + +type ConstantScorer float64 + +func (c ConstantScorer) Score(_ int, _ float64) float64 { + return float64(c) +} + +func (c ConstantScorer) Explain(_ int, _ float64) *search.Explanation { + return search.NewExplanation(float64(c), "constant") +} + +func (c ConstantScorer) ScoreComposite(_ []*search.DocumentMatch) float64 { + return float64(c) +} +func (c ConstantScorer) ExplainComposite(_ []*search.DocumentMatch) *search.Explanation { + return search.NewExplanation(float64(c), "constant") +} diff --git a/vendor/github.com/blugelabs/bluge/search/size.go b/vendor/github.com/blugelabs/bluge/search/size.go new file mode 100644 index 000000000..7f4ce89f1 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/size.go @@ -0,0 +1,48 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package search + +import ( + "reflect" +) + +func init() { + var ptr *int + sizeOfPtr = int(reflect.TypeOf(ptr).Size()) + var str string + sizeOfString = int(reflect.TypeOf(str).Size()) + var slice []int + sizeOfSlice = int(reflect.TypeOf(slice).Size()) + var e Explanation + reflectStaticSizeExplanation = int(reflect.TypeOf(e).Size()) + var dm DocumentMatch + reflectStaticSizeDocumentMatch = int(reflect.TypeOf(dm).Size()) + var sc Context + reflectStaticSizeSearchContext = int(reflect.TypeOf(sc).Size()) + var l Location + reflectStaticSizeLocation = int(reflect.TypeOf(l).Size()) + var dmp DocumentMatchPool + reflectStaticSizeDocumentMatchPool = int(reflect.TypeOf(dmp).Size()) +} + +var sizeOfPtr int +var sizeOfString int +var sizeOfSlice int + +var reflectStaticSizeExplanation int +var reflectStaticSizeDocumentMatch int +var reflectStaticSizeSearchContext int +var reflectStaticSizeLocation int +var reflectStaticSizeDocumentMatchPool int diff --git a/vendor/github.com/blugelabs/bluge/search/sort.go b/vendor/github.com/blugelabs/bluge/search/sort.go new file mode 100644 index 000000000..6a5430dec --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/sort.go @@ -0,0 +1,159 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package search + +import ( + "bytes" + "strings" +) + +type SortOrder []*Sort + +func (o SortOrder) Fields() (fields []string) { + for _, sort := range o { + fields = append(fields, sort.Fields()...) + } + return fields +} + +func (o SortOrder) Copy() SortOrder { + rv := make(SortOrder, len(o)) + copy(rv, o) + return rv +} + +func (o SortOrder) Reverse() { + for _, oi := range o { + oi.desc = !oi.desc + oi.missingFirst = !oi.missingFirst + } +} + +func (o SortOrder) Compute(match *DocumentMatch) { + for _, sort := range o { + match.SortValue = append(match.SortValue, sort.Value(match)) + } +} + +func (o SortOrder) Compare(i, j *DocumentMatch) int { + // compare the documents on all search sorts until a differences is found + for x := range o { + c := 0 + + iVal := i.SortValue[x] + jVal := j.SortValue[x] + c = bytes.Compare(iVal, jVal) + if c == 0 { + continue + } + if o[x].desc { + c = -c + } + return c + } + // if they are the same at this point, impose order based on index natural sort order + if i.HitNumber == j.HitNumber { + return 0 + } else if i.HitNumber > j.HitNumber { + return 1 + } + return -1 +} + +type SortValue [][]byte + +type Sort struct { + source TextValueSource + desc bool + missingFirst bool +} + +func SortBy(source TextValueSource) *Sort { + rv := &Sort{} + + rv.source = MissingTextValue(source, &sortFirstLast{ + desc: &rv.desc, + first: &rv.missingFirst, + }) + + return rv +} + +func (s *Sort) Desc() *Sort { + s.desc = true + return s +} + +func (s *Sort) MissingFirst() *Sort { + s.missingFirst = true + return s +} + +func (s *Sort) Fields() []string { + return s.source.Fields() +} + +func (s *Sort) Value(match *DocumentMatch) []byte { + return s.source.Value(match) +} + +func ParseSearchSortString(input string) *Sort { + descending := false + if strings.HasPrefix(input, "-") { + descending = true + input = input[1:] + } + input = strings.TrimPrefix(input, "+") + if input == "_score" { + return SortBy(&ScoreSource{}).Desc() + } + rv := SortBy(Field(input)) + if descending { + rv.Desc() + } + return rv +} + +func ParseSortOrderStrings(in []string) SortOrder { + rv := make(SortOrder, 0, len(in)) + for _, i := range in { + ss := ParseSearchSortString(i) + rv = append(rv, ss) + } + return rv +} + +var highTerm = bytes.Repeat([]byte{0xff}, 10) +var lowTerm []byte = []byte{0x00} + +type sortFirstLast struct { + desc *bool + first *bool +} + +func (c *sortFirstLast) Fields() []string { + return nil +} + +func (c *sortFirstLast) Value(_ *DocumentMatch) []byte { + if c.desc != nil && *c.desc && c.first != nil && *c.first { + return highTerm + } else if c.desc != nil && *c.desc { + return lowTerm + } else if c.first != nil && *c.first { + return lowTerm + } + return highTerm +} diff --git a/vendor/github.com/blugelabs/bluge/search/source.go b/vendor/github.com/blugelabs/bluge/search/source.go new file mode 100644 index 000000000..637807b11 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/search/source.go @@ -0,0 +1,412 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package search + +import ( + "math" + "time" + + "github.com/blugelabs/bluge/numeric" + "github.com/blugelabs/bluge/numeric/geo" +) + +type TextValueSource interface { + Fields() []string + Value(match *DocumentMatch) []byte +} + +type TextValuesSource interface { + Fields() []string + Values(match *DocumentMatch) [][]byte +} + +type NumericValueSource interface { + Fields() []string + Number(match *DocumentMatch) float64 +} + +type NumericValuesSource interface { + Fields() []string + Numbers(match *DocumentMatch) []float64 +} + +type DateValueSource interface { + Fields() []string + Date(match *DocumentMatch) time.Time +} + +type DateValuesSource interface { + Fields() []string + Dates(match *DocumentMatch) []time.Time +} + +type GeoPointValueSource interface { + Fields() []string + GeoPoint(match *DocumentMatch) *geo.Point +} + +type GeoPointValuesSource interface { + Fields() []string + GeoPoints(match *DocumentMatch) []*geo.Point +} + +type FieldSource string + +func Field(field string) FieldSource { + return FieldSource(field) +} + +func (f FieldSource) Fields() []string { + return []string{string(f)} +} + +func (f FieldSource) Value(match *DocumentMatch) []byte { + return firstTerm(RemoveNumericPaddedTerms(f.Values(match))) +} + +func (f FieldSource) Values(match *DocumentMatch) [][]byte { + return match.DocValues(string(f)) +} + +func (f FieldSource) Number(match *DocumentMatch) float64 { + return firstNumber(f.Numbers(match)) +} + +func (f FieldSource) Numbers(match *DocumentMatch) []float64 { + var rv []float64 + for _, term := range f.Values(match) { + prefixCoded := numeric.PrefixCoded(term) + shift, err := prefixCoded.Shift() + if err == nil && shift == 0 { + i64, err := prefixCoded.Int64() + if err == nil { + f64 := numeric.Int64ToFloat64(i64) + rv = append(rv, f64) + } + } + } + return rv +} + +func (f FieldSource) Date(match *DocumentMatch) time.Time { + return firstDate(f.Dates(match)) +} + +func (f FieldSource) Dates(match *DocumentMatch) []time.Time { + var rv []time.Time + for _, term := range f.Values(match) { + prefixCoded := numeric.PrefixCoded(term) + shift, err := prefixCoded.Shift() + if err == nil && shift == 0 { + i64, err := prefixCoded.Int64() + if err == nil { + t := time.Unix(0, i64) + rv = append(rv, t) + } + } + } + return rv +} + +func (f FieldSource) GeoPoint(match *DocumentMatch) *geo.Point { + return firstGeoPoint(f.GeoPoints(match)) +} + +func (f FieldSource) GeoPoints(match *DocumentMatch) []*geo.Point { + var rv []*geo.Point + for _, term := range f.Values(match) { + prefixCoded := numeric.PrefixCoded(term) + shift, err := prefixCoded.Shift() + if err == nil && shift == 0 { + i64, err := prefixCoded.Int64() + if err == nil { + rv = append(rv, &geo.Point{ + Lon: geo.MortonUnhashLon(uint64(i64)), + Lat: geo.MortonUnhashLat(uint64(i64)), + }) + } + } + } + return rv +} + +type ScoreSource struct{} + +func DocumentScore() *ScoreSource { + return &ScoreSource{} +} + +func (n *ScoreSource) Fields() []string { + return []string{} +} + +func (n *ScoreSource) Value(d *DocumentMatch) []byte { + return numeric.MustNewPrefixCodedInt64(numeric.Float64ToInt64(d.Score), 0) +} + +func (n *ScoreSource) Values(d *DocumentMatch) [][]byte { + return [][]byte{numeric.MustNewPrefixCodedInt64(numeric.Float64ToInt64(d.Score), 0)} +} + +func (n *ScoreSource) Number(d *DocumentMatch) float64 { + return d.Score +} + +func (n *ScoreSource) Numbers(d *DocumentMatch) []float64 { + return []float64{d.Score} +} + +type MissingTextValueSource struct { + primary, replacement TextValueSource +} + +func MissingTextValue(primary, replacement TextValueSource) *MissingTextValueSource { + return &MissingTextValueSource{ + primary: primary, + replacement: replacement, + } +} + +func (f *MissingTextValueSource) Fields() []string { + return append(f.primary.Fields(), f.replacement.Fields()...) +} + +func (f *MissingTextValueSource) Value(match *DocumentMatch) []byte { + primaryValue := f.primary.Value(match) + if primaryValue == nil { + return f.replacement.Value(match) + } + return primaryValue +} + +type MissingNumericSource struct { + primary, replacement NumericValuesSource +} + +func MissingNumeric(primary, replacement NumericValuesSource) *MissingNumericSource { + return &MissingNumericSource{ + primary: primary, + replacement: replacement, + } +} + +func (f *MissingNumericSource) Fields() []string { + var rv []string + rv = append(rv, f.primary.Fields()...) + rv = append(rv, f.replacement.Fields()...) + return rv +} + +func (f *MissingNumericSource) Numbers(match *DocumentMatch) []float64 { + primaryValues := f.primary.Numbers(match) + if len(primaryValues) == 0 { + return f.replacement.Numbers(match) + } + return primaryValues +} + +type MissingDateSource struct { + primary, replacement DateValuesSource +} + +func MissingDate(primary, replacement DateValuesSource) *MissingDateSource { + return &MissingDateSource{ + primary: primary, + replacement: replacement, + } +} + +func (f *MissingDateSource) Fields() []string { + var rv []string + rv = append(rv, f.primary.Fields()...) + rv = append(rv, f.replacement.Fields()...) + return rv +} + +func (f *MissingDateSource) Numbers(match *DocumentMatch) []time.Time { + primaryValues := f.primary.Dates(match) + if len(primaryValues) == 0 { + return f.replacement.Dates(match) + } + return primaryValues +} + +type MissingGeoPointSource struct { + primary, replacement GeoPointValuesSource +} + +func MissingGeoPoints(primary, replacement GeoPointValuesSource) *MissingGeoPointSource { + return &MissingGeoPointSource{ + primary: primary, + replacement: replacement, + } +} + +func (f *MissingGeoPointSource) Fields() []string { + var rv []string + rv = append(rv, f.primary.Fields()...) + rv = append(rv, f.replacement.Fields()...) + return rv +} + +func (f *MissingGeoPointSource) GeoPoints(match *DocumentMatch) []*geo.Point { + primaryValues := f.primary.GeoPoints(match) + if len(primaryValues) == 0 { + return f.replacement.GeoPoints(match) + } + return primaryValues +} + +type FilteringTextSource struct { + source TextValuesSource + filter func([]byte) bool +} + +func FilterText(source TextValuesSource, filter func([]byte) bool) *FilteringTextSource { + return &FilteringTextSource{ + source: source, + filter: filter, + } +} + +func (f *FilteringTextSource) Fields() []string { + return f.source.Fields() +} + +func (f *FilteringTextSource) Values(match *DocumentMatch) [][]byte { + var rv [][]byte + values := f.source.Values(match) + for _, val := range values { + if f.filter(val) { + rv = append(rv, val) + } + } + return rv +} + +func firstTerm(sourceValues [][]byte) []byte { + if len(sourceValues) > 0 { + return sourceValues[0] + } + return nil +} + +func firstNumber(sourceValues []float64) float64 { + if len(sourceValues) > 0 { + return sourceValues[0] + } + return math.NaN() +} + +func firstDate(sourceValues []time.Time) time.Time { + if len(sourceValues) > 0 { + return sourceValues[0] + } + return time.Time{} +} + +func firstGeoPoint(sourceValues []*geo.Point) *geo.Point { + if len(sourceValues) > 0 { + return sourceValues[0] + } + return nil +} + +func RemoveNumericPaddedTerms(sourceValues [][]byte) [][]byte { + var allValidNumeric = true + var zeroPaddedNumeric [][]byte + for _, term := range sourceValues { + prefixCoded := numeric.PrefixCoded(term) + shift, err := prefixCoded.Shift() + if err == nil && shift == 0 { + zeroPaddedNumeric = append(zeroPaddedNumeric, term) + } else { + allValidNumeric = false + break + } + } + // if all terms we saw looked like valid numeric encoded terms + // AND there was at least one zero padded numeric term + // return only the zero padded numeric terms + if allValidNumeric && len(zeroPaddedNumeric) > 0 { + return zeroPaddedNumeric + } + // otherwise return all the terms + return sourceValues +} + +type PointDistanceSource struct { + a, b GeoPointValueSource + unit geo.DistanceUnit +} + +func NewGeoPointDistanceSource(a, b GeoPointValueSource, unit geo.DistanceUnit) *PointDistanceSource { + return &PointDistanceSource{ + a: a, + b: b, + unit: unit, + } +} + +func (p PointDistanceSource) Fields() []string { + return append(p.a.Fields(), p.b.Fields()...) +} + +func (p PointDistanceSource) Value(match *DocumentMatch) []byte { + distInt64 := numeric.Float64ToInt64(p.Number(match)) + return numeric.MustNewPrefixCodedInt64(distInt64, 0) +} + +func (p PointDistanceSource) Values(match *DocumentMatch) [][]byte { + return [][]byte{p.Value(match)} +} + +func (p PointDistanceSource) Number(match *DocumentMatch) float64 { + pointA := p.a.GeoPoint(match) + pointB := p.b.GeoPoint(match) + dist := geo.Haversin(pointA.Lon, pointA.Lat, pointB.Lon, pointB.Lat) + // dist is returned in km, convert to desired unit + return geo.Convert(dist, geo.Kilometer, p.unit) +} + +func (p PointDistanceSource) Numbers(match *DocumentMatch) []float64 { + return []float64{p.Number(match)} +} + +type ConstantGeoPointSource geo.Point + +func NewConstantGeoPointSource(p geo.Point) *ConstantGeoPointSource { + rv := ConstantGeoPointSource(p) + return &rv +} + +func (p *ConstantGeoPointSource) Fields() []string { + return nil +} + +func (p *ConstantGeoPointSource) GeoPoint(_ *DocumentMatch) *geo.Point { + var gp = geo.Point(*p) + return &gp +} + +type ConstantTextValueSource []byte + +func (c ConstantTextValueSource) Fields() []string { + return nil +} + +func (c ConstantTextValueSource) Value(_ *DocumentMatch) []byte { + return c +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/util.go b/vendor/github.com/blugelabs/bluge/search/util.go similarity index 88% rename from vendor/github.com/blevesearch/bleve/v2/search/util.go rename to vendor/github.com/blugelabs/bluge/search/util.go index 19dd5d68b..d311eb951 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/util.go +++ b/vendor/github.com/blugelabs/bluge/search/util.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -56,10 +56,9 @@ func MergeFieldTermLocations(dest []FieldTermLocation, matches []*DocumentMatch) Field: ftl.Field, Term: ftl.Term, Location: Location{ - Pos: ftl.Location.Pos, - Start: ftl.Location.Start, - End: ftl.Location.End, - ArrayPositions: append(ArrayPositions(nil), ftl.Location.ArrayPositions...), + Pos: ftl.Location.Pos, + Start: ftl.Location.Start, + End: ftl.Location.End, }, }) } diff --git a/vendor/github.com/blugelabs/bluge/size.go b/vendor/github.com/blugelabs/bluge/size.go new file mode 100644 index 000000000..e42a3feab --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/size.go @@ -0,0 +1,46 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + "reflect" + + "github.com/blugelabs/bluge/search" +) + +var documentMatchEmptySize int +var searchContextEmptySize int +var reflectStaticSizeBaseField int +var sizeOfSlice int +var sizeOfString int +var sizeOfPtr int +var sizeOfBool int + +func init() { + var dm search.DocumentMatch + documentMatchEmptySize = dm.Size() + var sc search.Context + searchContextEmptySize = sc.Size() + var f TermField + reflectStaticSizeBaseField = int(reflect.TypeOf(f).Size()) + var slice []int + sizeOfSlice = int(reflect.TypeOf(slice).Size()) + var str string + sizeOfString = int(reflect.TypeOf(str).Size()) + var ptr *int + sizeOfPtr = int(reflect.TypeOf(ptr).Size()) + var b bool + sizeOfBool = int(reflect.TypeOf(b).Size()) +} diff --git a/vendor/github.com/blugelabs/bluge/writer.go b/vendor/github.com/blugelabs/bluge/writer.go new file mode 100644 index 000000000..d3330da35 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/writer.go @@ -0,0 +1,79 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + "fmt" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blugelabs/bluge/index" +) + +type Writer struct { + config Config + chill *index.Writer +} + +func OpenWriter(config Config) (*Writer, error) { + rv := &Writer{ + config: config, + } + + var err error + rv.chill, err = index.OpenWriter(config.indexConfig) + if err != nil { + return nil, fmt.Errorf("error opening index: %w", err) + } + + return rv, nil +} + +func (w *Writer) Insert(doc segment.Document) error { + b := NewBatch() + b.Insert(doc) + return w.Batch(b) +} + +func (w *Writer) Update(id segment.Term, doc segment.Document) error { + b := NewBatch() + b.Update(id, doc) + return w.Batch(b) +} + +func (w *Writer) Delete(id segment.Term) error { + b := NewBatch() + b.Delete(id) + return w.Batch(b) +} + +func (w *Writer) Batch(batch *index.Batch) error { + return w.chill.Batch(batch) +} + +func (w *Writer) Close() error { + return w.chill.Close() +} + +func (w *Writer) Reader() (*Reader, error) { + r, err := w.chill.Reader() + if err != nil { + return nil, fmt.Errorf("error getting nreal time reader: %w", err) + } + return &Reader{ + config: w.config, + reader: r, + }, nil +} diff --git a/vendor/github.com/blugelabs/bluge/writer_offline.go b/vendor/github.com/blugelabs/bluge/writer_offline.go new file mode 100644 index 000000000..8e9eb2712 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge/writer_offline.go @@ -0,0 +1,72 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bluge + +import ( + "fmt" + + segment "github.com/blugelabs/bluge_segment_api" + + "github.com/blugelabs/bluge/index" +) + +type OfflineWriter struct { + writer *index.WriterOffline + + batchSize int + maxSegmentsToMerge int + batch *index.Batch + batchCount int +} + +func OpenOfflineWriter(config Config, batchSize, maxSegmentsToMerge int) (*OfflineWriter, error) { + rv := &OfflineWriter{ + batchSize: batchSize, + maxSegmentsToMerge: maxSegmentsToMerge, + batch: index.NewBatch(), + } + + var err error + rv.writer, err = index.OpenOfflineWriter(config.indexConfig) + if err != nil { + return nil, fmt.Errorf("error opening index: %w", err) + } + + return rv, nil +} + +func (w *OfflineWriter) Insert(doc segment.Document) error { + w.batch.Insert(doc) + w.batchCount++ + if w.batchCount > w.batchSize { + err := w.writer.Batch(w.batch) + if err != nil { + return err + } + w.batch.Reset() + w.batchCount = 0 + } + return nil +} + +func (w *OfflineWriter) Close() error { + if w.batchCount > 0 { + err := w.writer.Batch(w.batch) + if err != nil { + return err + } + } + return w.writer.Close() +} diff --git a/vendor/github.com/blevesearch/zapx/v14/.gitignore b/vendor/github.com/blugelabs/bluge_segment_api/.gitignore similarity index 86% rename from vendor/github.com/blevesearch/zapx/v14/.gitignore rename to vendor/github.com/blugelabs/bluge_segment_api/.gitignore index 46d1cfad5..49e6c2bce 100644 --- a/vendor/github.com/blevesearch/zapx/v14/.gitignore +++ b/vendor/github.com/blugelabs/bluge_segment_api/.gitignore @@ -7,6 +7,5 @@ **/.idea/ **/*.iml .DS_Store -/cmd/zap/zap *.test tags diff --git a/vendor/github.com/blugelabs/bluge_segment_api/.golangci.yml b/vendor/github.com/blugelabs/bluge_segment_api/.golangci.yml new file mode 100644 index 000000000..13e02bd54 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge_segment_api/.golangci.yml @@ -0,0 +1,148 @@ +linters-settings: + depguard: + list-type: blacklist + packages: + # logging is allowed only by logutils.Log, logrus + # is allowed to use only in logutils package + - github.com/sirupsen/logrus + packages-with-error-message: + - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 2 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - whyNoLint + - wrapperFunc + gocyclo: + min-complexity: 20 # increased from 15 to get us going, but not make things worse + goimports: + local-prefixes: github.com/golangci/golangci-lint + golint: + min-confidence: 0 + gomnd: + settings: + mnd: + # don't include the "operation" and "assign" + checks: argument,case,condition,return + govet: + check-shadowing: true + settings: + printf: + funcs: + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + nolintlint: + allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space) + allow-unused: false # report any unused nolint directives + require-explanation: false # don't require an explanation for nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +linters: + # please, do not use `enable-all`: it's deprecated and will be removed soon. + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - dupl + - errcheck + - funlen + - gochecknoinits + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - golint + - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - lll + - misspell + - nakedret + - nolintlint + - rowserrcheck + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + + # don't enable: + # - asciicheck + # - gochecknoglobals + # - gocognit + # - godot + # - godox + # - goerr113 + # - maligned + # - nestif + # - prealloc + # - testpackage + # - wsl + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: _test\.go + linters: + - gomnd + - path: cmd/ice/cmd + linters: + - gochecknoinits + - path: sizes.go + linters: + - gochecknoinits + + # https://github.com/go-critic/go-critic/issues/926 + - linters: + - gocritic + text: "unnecessaryDefer:" + +run: + skip-dirs: + - test/testdata_etc + - internal/cache + - internal/renameio + - internal/robustio + +# golangci.com configuration +# https://github.com/golangci/golangci/wiki/Configuration +service: + golangci-lint-version: 1.23.x # use the fixed version to not introduce new linters unexpectedly + prepare: + - echo "here I can run custom commands, but no preparation needed for this repo" diff --git a/vendor/github.com/blugelabs/bluge_segment_api/AUTHORS b/vendor/github.com/blugelabs/bluge_segment_api/AUTHORS new file mode 100644 index 000000000..4ee2adc70 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge_segment_api/AUTHORS @@ -0,0 +1,10 @@ +# This is the official list of Bluge authors for copyright purposes. +# +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name +# +# Please keep the list sorted. + +Marty Schoch diff --git a/vendor/github.com/blugelabs/bluge_segment_api/CONTRIBUTING.md b/vendor/github.com/blugelabs/bluge_segment_api/CONTRIBUTING.md new file mode 100644 index 000000000..b95ed0a5a --- /dev/null +++ b/vendor/github.com/blugelabs/bluge_segment_api/CONTRIBUTING.md @@ -0,0 +1,11 @@ +# Contributing to Bluge + +Bluge is an open source project. + +Thank you for your contribution, we appreciate your help! + +## Contributing code + +Portions of existing code are copyright Couchbase, Inc. + +All new contributions should be copyright The Bluge Authors. New contributors should add an appropriate entry to the AUTHORS file at the root of the repository. All contributions must be distributed under the Apache License found in the LICENSE file. diff --git a/vendor/github.com/blevesearch/bleve_index_api/LICENSE b/vendor/github.com/blugelabs/bluge_segment_api/LICENSE similarity index 100% rename from vendor/github.com/blevesearch/bleve_index_api/LICENSE rename to vendor/github.com/blugelabs/bluge_segment_api/LICENSE diff --git a/vendor/github.com/blugelabs/bluge_segment_api/README.md b/vendor/github.com/blugelabs/bluge_segment_api/README.md new file mode 100644 index 000000000..9ef2c4495 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge_segment_api/README.md @@ -0,0 +1,9 @@ +# Bluge Segment API + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/blugelabs/bluge_segment_api)](https://pkg.go.dev/github.com/blugelabs/bluge_segment_api) +[![Tests](https://github.com/blugelabs/bluge_segment_api/workflows/Tests/badge.svg?branch=master&event=push)](https://github.com/blugelabs/bluge_segment_api/actions?query=workflow%3ATests+event%3Apush+branch%3Amaster) +[![Lint](https://github.com/blugelabs/bluge_segment_api/workflows/Lint/badge.svg?branch=master&event=push)](https://github.com/blugelabs/bluge_segment_api/actions?query=workflow%3ALint+event%3Apush+branch%3Amaster) + +These interfaces define the relationship between Bluge and it's segment file formats. + +The expectation is that these interfaces can be well-defined, and evolve slowly. By doing so, Bluge and the file formats themselves can evolve more quickly, and yet remain compatible. \ No newline at end of file diff --git a/vendor/github.com/blevesearch/scorch_segment_api/v2/automaton.go b/vendor/github.com/blugelabs/bluge_segment_api/automaton.go similarity index 90% rename from vendor/github.com/blevesearch/scorch_segment_api/v2/automaton.go rename to vendor/github.com/blugelabs/bluge_segment_api/automaton.go index 4577ceb2e..072982d67 100644 --- a/vendor/github.com/blevesearch/scorch_segment_api/v2/automaton.go +++ b/vendor/github.com/blugelabs/bluge_segment_api/automaton.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Couchbase, Inc. +// Copyright (c) 2020 The Bluge Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ package segment -// Automaton represents the general contract of a byte-based finite automaton type Automaton interface { // Start returns the start state diff --git a/vendor/github.com/blugelabs/bluge_segment_api/data.go b/vendor/github.com/blugelabs/bluge_segment_api/data.go new file mode 100644 index 000000000..bb569ce98 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge_segment_api/data.go @@ -0,0 +1,129 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segment + +import ( + "io" + "os" +) + +// Data is an opaque representation of some data. +// This data could have been read onto the heap, +// it could be a live memory-mapped region, +// or it could be loaded on demand using traditional +// file I/O. +// +// Micro-benchmarking supported using this concrete structure +// with simple conditional over an interface with multiple +// implementations. +type Data struct { + mem []byte + r io.ReaderAt + sz int +} + +func NewDataBytes(b []byte) *Data { + return &Data{ + mem: b, + } +} + +func NewDataFile(f *os.File) (*Data, error) { + fInfo, err := f.Stat() + if err != nil { + return nil, err + } + return &Data{ + r: f, + sz: int(fInfo.Size()), + }, nil +} + +func (d *Data) Read(start, end int) ([]byte, error) { + if d.mem != nil { + return d.mem[start:end], nil + } + rv := make([]byte, end-start) + _, err := d.r.ReadAt(rv, int64(start)) + if err != nil { + return nil, err + } + return rv, nil +} + +func (d *Data) Len() int { + if d.mem != nil { + return len(d.mem) + } + return d.sz +} + +func (d *Data) Slice(start, end int) *Data { + if d.mem != nil { + return &Data{ + mem: d.mem[start:end], + } + } + return &Data{ + r: io.NewSectionReader(d.r, int64(start), int64(end-start)), + sz: end - start, + } +} + +func (d *Data) Reader() *DataReader { + return &DataReader{ + d: d, + } +} + +func (d *Data) WriteTo(w io.Writer) (int64, error) { + if d.mem != nil { + n, err := w.Write(d.mem) + return int64(n), err + } + dataReader := d.Reader() + return io.Copy(w, dataReader) +} + +func (d *Data) Size() int { + if d.mem != nil { + return cap(d.mem) + } + // FIXME not really 0 need size stuff + return 0 +} + +type DataReader struct { + d *Data + n int +} + +func (r *DataReader) Read(p []byte) (n int, err error) { + if r.n >= r.d.Len() { + return 0, io.EOF + } + start := r.n + end := r.n + len(p) + if end > r.d.Len() { + end = r.d.Len() + } + data, err := r.d.Read(start, end) + if err != nil { + return 0, err + } + copy(p, data) + r.n = end + return end - start, nil +} diff --git a/vendor/github.com/blugelabs/bluge_segment_api/document.go b/vendor/github.com/blugelabs/bluge_segment_api/document.go new file mode 100644 index 000000000..d044b8bbb --- /dev/null +++ b/vendor/github.com/blugelabs/bluge_segment_api/document.go @@ -0,0 +1,43 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segment + +type Document interface { + Analyze() + EachField(vf VisitField) +} + +type VisitField func(Field) + +type Field interface { + Name() string + Length() int + EachTerm(vt VisitTerm) + Value() []byte + + Index() bool + Store() bool + IndexDocValues() bool +} + +type VisitTerm func(FieldTerm) + +type FieldTerm interface { + Term() []byte + Frequency() int + EachLocation(vl VisitLocation) +} + +type VisitLocation func(Location) diff --git a/vendor/github.com/blevesearch/scorch_segment_api/v2/segment.go b/vendor/github.com/blugelabs/bluge_segment_api/segment.go similarity index 53% rename from vendor/github.com/blevesearch/scorch_segment_api/v2/segment.go rename to vendor/github.com/blugelabs/bluge_segment_api/segment.go index 702214a55..aa77fb14c 100644 --- a/vendor/github.com/blevesearch/scorch_segment_api/v2/segment.go +++ b/vendor/github.com/blugelabs/bluge_segment_api/segment.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,64 +16,76 @@ package segment import ( "fmt" + "io" "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" ) var ErrClosed = fmt.Errorf("index closed") -// StoredFieldValueVisitor defines a callback to be visited for each +// StoredFieldVisitor defines a callback to be visited for each // stored field value. The return value determines if the visitor // should keep going. Returning true continues visiting, false stops. -type StoredFieldValueVisitor func(field string, typ byte, value []byte, pos []uint64) bool +type StoredFieldVisitor func(field string, value []byte) bool -type Segment interface { - Dictionary(field string) (TermDictionary, error) +// DocumentValueVisitor is the callback function used by the +// DocumentValueReader's VisitDocumentValues method. +type DocumentValueVisitor func(field string, term []byte) - VisitStoredFields(num uint64, visitor StoredFieldValueVisitor) error +type Term interface { + Field() string + Term() []byte +} - DocID(num uint64) ([]byte, error) +type Segment interface { + Dictionary(field string) (Dictionary, error) + + VisitStoredFields(num uint64, visitor StoredFieldVisitor) error Count() uint64 - DocNumbers([]string) (*roaring.Bitmap, error) + DocsMatchingTerms([]Term) (*roaring.Bitmap, error) Fields() []string - Close() error + CollectionStats(field string) (CollectionStats, error) Size() int - AddRef() - DecRef() error -} + DocumentValueReader(fields []string) (DocumentValueReader, error) -type UnpersistedSegment interface { - Segment - Persist(path string) error + WriteTo(w io.Writer, closeCh chan struct{}) (int64, error) + + Type() string + Version() uint32 } -type PersistedSegment interface { - Segment - Path() string +type DictionaryLookup interface { + Contains(key []byte) (bool, error) + Close() error } -type TermDictionary interface { +type Dictionary interface { + DictionaryLookup + PostingsList(term []byte, except *roaring.Bitmap, prealloc PostingsList) (PostingsList, error) - AutomatonIterator(a Automaton, + Iterator(a Automaton, startKeyInclusive, endKeyExclusive []byte) DictionaryIterator +} - Contains(key []byte) (bool, error) +type DictionaryEntry interface { + Term() string + Count() uint64 } type DictionaryIterator interface { - Next() (*index.DictEntry, error) + Next() (DictionaryEntry, error) + Close() error } type PostingsList interface { - Iterator(includeFreq, includeNorm, includeLocations bool, prealloc PostingsIterator) PostingsIterator + Iterator(includeFreq, includeNorm, includeLocations bool, prealloc PostingsIterator) (PostingsIterator, error) Size() int @@ -99,6 +111,13 @@ type PostingsIterator interface { Advance(docNum uint64) (Posting, error) Size() int + + // is this postings iterator empty? + Empty() bool + + Count() uint64 + + Close() error } type OptimizablePostingsIterator interface { @@ -109,40 +128,45 @@ type OptimizablePostingsIterator interface { type Posting interface { Number() uint64 - - Frequency() uint64 + SetNumber(uint64) + Frequency() int Norm() float64 - Locations() []Location - Size() int } type Location interface { Field() string - Start() uint64 - End() uint64 - Pos() uint64 - ArrayPositions() []uint64 + Start() int + End() int + Pos() int Size() int } -// DocValueVisitable is implemented by various scorch segment -// implementations with persistence for the un inverting of the -// postings or other indexed values. -type DocValueVisitable interface { - VisitDocValues(localDocNum uint64, fields []string, - visitor index.DocValueVisitor, optional DocVisitState) (DocVisitState, error) +type Merger interface { + WriteTo(w io.Writer, closeCh chan struct{}) (n int64, err error) + DocumentNumbers() [][]uint64 +} - // VisitableDocValueFields implementation should return - // the list of fields which are document value persisted and - // therefore visitable by the above VisitDocValues method. - VisitableDocValueFields() ([]string, error) +type DocumentValueReader interface { + VisitDocumentValues(number uint64, visitor DocumentValueVisitor) error } type DocVisitState interface { } -type StatsReporter interface { - ReportBytesWritten(bytesWritten uint64) +type Optimizable interface { + Optimize(kind string, octx OptimizableContext) (OptimizableContext, error) +} + +type OptimizableContext interface { + // Once all the optimzable resources have been provided the same + // OptimizableContext instance, the optimization preparations are + // finished or completed via the Finish() method. + // + // Depending on the optimization being performed, the Finish() + // method might return a non-nil Optimized instance. For example, + // the Optimized instance might represent an optimized + // PostingsIterator instance. + Finish() (PostingsIterator, error) } diff --git a/vendor/github.com/blugelabs/bluge_segment_api/stats.go b/vendor/github.com/blugelabs/bluge_segment_api/stats.go new file mode 100644 index 000000000..ab8841bb2 --- /dev/null +++ b/vendor/github.com/blugelabs/bluge_segment_api/stats.go @@ -0,0 +1,42 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segment + +type CollectionStats interface { + + // TotalDocumentCount returns the number of documents, regardless of whether or not + // they have any terms for this field + TotalDocumentCount() uint64 + + // DocumentCount returns the number of documents with at least one term for this field + DocumentCount() uint64 + + // SumTotalTermFrequency returns to total number of tokens across all documents + SumTotalTermFrequency() uint64 + + // SumDocumentFrequency returns the sum of all posting list entries for this field + // SumDocumentFrequency() int + + Merge(CollectionStats) +} + +type TermStats interface { + + // DocumentFrequency returns the number of documents using this term + DocumentFrequency() uint64 + + // TotalTermFrequency returns the total number of occurrences of this term + // TotalTermFrequency() int +} diff --git a/vendor/github.com/blevesearch/zapx/v11/.gitignore b/vendor/github.com/blugelabs/ice/.gitignore similarity index 86% rename from vendor/github.com/blevesearch/zapx/v11/.gitignore rename to vendor/github.com/blugelabs/ice/.gitignore index 46d1cfad5..44e27bf99 100644 --- a/vendor/github.com/blevesearch/zapx/v11/.gitignore +++ b/vendor/github.com/blugelabs/ice/.gitignore @@ -7,6 +7,6 @@ **/.idea/ **/*.iml .DS_Store -/cmd/zap/zap +/cmd/ice/ice *.test tags diff --git a/vendor/github.com/blugelabs/ice/.golangci.yml b/vendor/github.com/blugelabs/ice/.golangci.yml new file mode 100644 index 000000000..13e02bd54 --- /dev/null +++ b/vendor/github.com/blugelabs/ice/.golangci.yml @@ -0,0 +1,148 @@ +linters-settings: + depguard: + list-type: blacklist + packages: + # logging is allowed only by logutils.Log, logrus + # is allowed to use only in logutils package + - github.com/sirupsen/logrus + packages-with-error-message: + - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 2 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - whyNoLint + - wrapperFunc + gocyclo: + min-complexity: 20 # increased from 15 to get us going, but not make things worse + goimports: + local-prefixes: github.com/golangci/golangci-lint + golint: + min-confidence: 0 + gomnd: + settings: + mnd: + # don't include the "operation" and "assign" + checks: argument,case,condition,return + govet: + check-shadowing: true + settings: + printf: + funcs: + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + nolintlint: + allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space) + allow-unused: false # report any unused nolint directives + require-explanation: false # don't require an explanation for nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +linters: + # please, do not use `enable-all`: it's deprecated and will be removed soon. + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - dupl + - errcheck + - funlen + - gochecknoinits + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - golint + - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - lll + - misspell + - nakedret + - nolintlint + - rowserrcheck + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + + # don't enable: + # - asciicheck + # - gochecknoglobals + # - gocognit + # - godot + # - godox + # - goerr113 + # - maligned + # - nestif + # - prealloc + # - testpackage + # - wsl + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: _test\.go + linters: + - gomnd + - path: cmd/ice/cmd + linters: + - gochecknoinits + - path: sizes.go + linters: + - gochecknoinits + + # https://github.com/go-critic/go-critic/issues/926 + - linters: + - gocritic + text: "unnecessaryDefer:" + +run: + skip-dirs: + - test/testdata_etc + - internal/cache + - internal/renameio + - internal/robustio + +# golangci.com configuration +# https://github.com/golangci/golangci/wiki/Configuration +service: + golangci-lint-version: 1.23.x # use the fixed version to not introduce new linters unexpectedly + prepare: + - echo "here I can run custom commands, but no preparation needed for this repo" diff --git a/vendor/github.com/blugelabs/ice/AUTHORS b/vendor/github.com/blugelabs/ice/AUTHORS new file mode 100644 index 000000000..4ee2adc70 --- /dev/null +++ b/vendor/github.com/blugelabs/ice/AUTHORS @@ -0,0 +1,10 @@ +# This is the official list of Bluge authors for copyright purposes. +# +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name +# +# Please keep the list sorted. + +Marty Schoch diff --git a/vendor/github.com/blugelabs/ice/CONTRIBUTING.md b/vendor/github.com/blugelabs/ice/CONTRIBUTING.md new file mode 100644 index 000000000..b95ed0a5a --- /dev/null +++ b/vendor/github.com/blugelabs/ice/CONTRIBUTING.md @@ -0,0 +1,11 @@ +# Contributing to Bluge + +Bluge is an open source project. + +Thank you for your contribution, we appreciate your help! + +## Contributing code + +Portions of existing code are copyright Couchbase, Inc. + +All new contributions should be copyright The Bluge Authors. New contributors should add an appropriate entry to the AUTHORS file at the root of the repository. All contributions must be distributed under the Apache License found in the LICENSE file. diff --git a/vendor/github.com/blevesearch/scorch_segment_api/v2/LICENSE b/vendor/github.com/blugelabs/ice/LICENSE similarity index 100% rename from vendor/github.com/blevesearch/scorch_segment_api/v2/LICENSE rename to vendor/github.com/blugelabs/ice/LICENSE diff --git a/vendor/github.com/blugelabs/ice/README.md b/vendor/github.com/blugelabs/ice/README.md new file mode 100644 index 000000000..651ac6f18 --- /dev/null +++ b/vendor/github.com/blugelabs/ice/README.md @@ -0,0 +1,334 @@ +# ice + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/blugelabs/ice)](https://pkg.go.dev/github.com/blugelabs/ice) +[![Tests](https://github.com/blugelabs/ice/workflows/Tests/badge.svg?branch=master&event=push)](https://github.com/blugelabs/ice/actions?query=workflow%3ATests+event%3Apush+branch%3Amaster) +[![Lint](https://github.com/blugelabs/ice/workflows/Lint/badge.svg?branch=master&event=push)](https://github.com/blugelabs/ice/actions?query=workflow%3ALint+event%3Apush+branch%3Amaster) + +The file is written in the reverse order that we typically access data. This helps us write in one pass since later sections of the file require file offsets of things we've already written. + +Current usage: + +- crc-32 bytes and version are in fixed position at the end of the file +- reading remainder of footer could be version specific +- remainder of footer gives us: + - 3 important offsets (docValue, fields index and stored data index) + - 2 important values (number of docs and chunk factor) +- field data is processed once and memoized onto the heap so that we never have to go back to disk for it +- access to stored data by doc number means first navigating to the stored data index, then accessing a fixed position offset into that slice, which gives us the actual address of the data. the first bytes of that section tell us the size of data so that we know where it ends. +- access to all other indexed data follows the following pattern: + - first know the field name -> convert to id + - next navigate to term dictionary for that field + - some operations stop here and do dictionary ops + - next use dictionary to navigate to posting list for a specific term + - walk posting list + - if necessary, walk posting details as we go + - if location info is desired, consult location bitmap to see if it is there + +## stored fields section + +- for each document + - preparation phase: + - produce a slice of metadata bytes and data bytes + - produce these slices in field id order + - field value is appended to the data slice + - metadata slice is varint encoded with the following values for each field value + - field id (uint16) + - field value start offset in uncompressed data slice (uint64) + - field value length (uint64) + - compress the data slice using snappy + - file writing phase: + - remember the start offset for this document + - write out meta data length (varint uint64) + - write out compressed data length (varint uint64) + - write out the metadata bytes + - write out the compressed data bytes + +## stored fields idx + +- for each document + - write start offset (remembered from previous section) of stored data (big endian uint64) + +With this index and a known document number, we have direct access to all the stored field data. + +## posting details (freq/norm) section + +- for each posting list + - produce a slice containing multiple consecutive chunks (each chunk is varint stream) + - produce a slice remembering offsets of where each chunk starts + - preparation phase: + - for each hit in the posting list + - if this hit is in next chunk close out encoding of last chunk and record offset start of next + - encode term frequency (uint64) + - encode norm factor (float32) - similarity specific implementation + - file writing phase: + - remember start position for this posting list details + - write out number of chunks that follow (varint uint64) + - write out length of each chunk (each a varint uint64) + - write out the byte slice containing all the chunk data + +If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. + +## posting details (location) section + +- for each posting list + - produce a slice containing multiple consecutive chunks (each chunk is varint stream) + - produce a slice remembering offsets of where each chunk starts + - preparation phase: + - for each hit in the posting list + - if this hit is in next chunk close out encoding of last chunk and record offset start of next + - encode field (uint16) + - encode field pos (uint64) + - encode field start (uint64) + - encode field end (uint64) + - file writing phase: + - remember start position for this posting list details + - write out number of chunks that follow (varint uint64) + - write out length of each chunk (each a varint uint64) + - write out the byte slice containing all the chunk data + +If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. + +## postings list section + +- for each posting list + - preparation phase: + - encode roaring bitmap posting list to bytes (so we know the length) + - file writing phase: + - remember the start position for this posting list + - write freq/norm details offset (remembered from previous, as varint uint64) + - write location details offset (remembered from previous, as varint uint64) + - write length of encoded roaring bitmap + - write the serialized roaring bitmap data + +## dictionary + +- for each field + - preparation phase: + - encode vellum FST with dictionary data pointing to file offset of posting list (remembered from previous) + - file writing phase: + - remember the start position of this persistDictionary + - write length of vellum data (varint uint64) + - write out vellum data + +## fields section + +- for each field + - file writing phase: + - remember start offset for each field + - write dictionary address (remembered from previous) (varint uint64) + - write length of field name (varint uint64) + - write field name bytes + +## fields idx + +- for each field + - file writing phase: + - write big endian uint64 of start offset for each field + +NOTE: currently we don't know or record the length of this fields index. Instead we rely on the fact that we know it immediately precedes a footer of known size. + +## fields DocValue + +- for each field + - preparation phase: + - produce a slice containing multiple consecutive chunks, where each chunk is composed of a meta section followed by compressed columnar field data + - produce a slice remembering the length of each chunk + - file writing phase: + - remember the start position of this first field DocValue offset in the footer + - write out number of chunks that follow (varint uint64) + - write out length of each chunk (each a varint uint64) + - write out the byte slice containing all the chunk data + +NOTE: currently the meta header inside each chunk gives clue to the location offsets and size of the data pertaining to a given docID and any +read operation leverage that meta information to extract the document specific data from the file. + +## footer + +- file writing phase + - write number of docs (big endian uint64) + - write stored field index location (big endian uint64) + - write field index location (big endian uint64) + - write field docValue location (big endian uint64) + - write out chunk factor (big endian uint32) + - write out version (big endian uint32) + - write out file CRC of everything preceding this (big endian uint32) + +--- + +# ice file format diagrams + +## Legend + +### Sections + + |========| + | | section + |========| + +### Fixed-size fields + + |--------| |----| |--| |-| + | | uint64 | | uint32 | | uint16 | | uint8 + |--------| |----| |--| |-| + +### Varints + + |~~~~~~~~| + | | varint(up to uint64) + |~~~~~~~~| + +### Arbitrary-length fields + + |--------...---| + | | arbitrary-length field (string, vellum, roaring bitmap) + |--------...---| + +### Chunked data + + [--------] + [ ] + [--------] + +## Overview + +Footer section describes the configuration of particular ice file. The format of footer is version-dependent, so it is necessary to check `V` field before the parsing. + + |==================================================| + | Stored Fields | + |==================================================| + |-----> | Stored Fields Index | + | |==================================================| + | | Dictionaries + Postings + DocValues | + | |==================================================| + | |---> | DocValues Index | + | | |==================================================| + | | | Fields | + | | |==================================================| + | | |-> | Fields Index | + | | | |========|========|========|========|====|====|====| + | | | | D# | SF | F | FDV | CF | V | CC | (Footer) + | | | |========|====|===|====|===|====|===|====|====|====| + | | | | | | + |-+-+-----------------| | | + | |--------------------------| | + |-------------------------------------| + + D#. Number of Docs. + SF. Stored Fields Index Offset. + F. Field Index Offset. + FDV. Field DocValue Offset. + CF. Chunk Factor. + V. Version. + CC. CRC32. + +## Stored Fields + +Stored Fields Index is `D#` consecutive 64-bit unsigned integers - offsets, where relevant Stored Fields Data records are located. + + 0 [SF] [SF + D# * 8] + | Stored Fields | Stored Fields Index | + |================================|==================================| + | | | + | |--------------------| ||--------|--------|. . .|--------|| + | |-> | Stored Fields Data | || 0 | 1 | | D# - 1 || + | | |--------------------| ||--------|----|---|. . .|--------|| + | | | | | + |===|============================|==============|===================| + | | + |-------------------------------------------| + +Stored Fields Data is an arbitrary size record, which consists of metadata and [Snappy](https://github.com/golang/snappy)-compressed data. + + Stored Fields Data + |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| + | MDS | CDS | MD | CD | + |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| + + MDS. Metadata size. + CDS. Compressed data size. + MD. Metadata. + CD. Snappy-compressed data. + +## Fields + +Fields Index section located between addresses `F` and `len(file) - len(footer)` and consist of `uint64` values (`F1`, `F2`, ...) which are offsets to records in Fields section. We have `F# = (len(file) - len(footer) - F) / sizeof(uint64)` fields. + + + (...) [F] [F + F#] + | Fields | Fields Index. | + |==================================================|================================| + | | | + | |~~~~~~~~|~~~~~~~~|---...---|~~~~~~~~|~~~~~~~~|||--------|--------|...|--------|| + ||->| Dict | Length | Name | # Docs |Tot Freq||| 0 | 1 | | F# - 1 || + || |~~~~~~~~|~~~~~~~~|---...---|~~~~~~~~|~~~~~~~~|||--------|----|---|...|--------|| + || | | | + ||=================================================|==============|=================| + | | + |----------------------------------------------------------------| + + +## Dictionaries + Postings + +Each of fields has its own dictionary, encoded in [Vellum](https://github.com/couchbase/vellum) format. Dictionary consists of pairs `(term, offset)`, where `offset` indicates the position of postings (list of documents) for this particular term. + + |================================================================|- Dictionaries + + | | Postings + + | | DocValues + | Freq/Norm (chunked) | + | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | + | |->[ Freq | Norm (float32 under varint) ] | + | | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | + | | | + | |------------------------------------------------------------| | + | Location Details (chunked) | | + | [~~~~~~|~~~~~|~~~~~~~|~~~~~] | | + | |->[ Size | Pos | Start | End ] | | + | | [~~~~~~|~~~~~|~~~~~~~|~~~~~] | | + | | | | + | |----------------------| | | + | Postings List | | | + | |~~~~~~~~|~~~~~|~~|~~~~~~~~|-----------...--| | | + | |->| F/N | LD | Length | ROARING BITMAP | | | + | | |~~~~~|~~|~~~~~~~~|~~~~~~~~|-----------...--| | | + | | |----------------------------------------------| | + | |--------------------------------------| | + | Dictionary | | + | |~~~~~~~~|--------------------------|-...-| | + | |->| Length | VELLUM DATA : (TERM -> OFFSET) | | + | | |~~~~~~~~|----------------------------...-| | + | | | + |======|=========================================================|- DocValues Index + | | | + |======|=========================================================|- Fields + | | | + | |~~~~|~~~|~~~~~~~~|---...---| | + | | Dict | Length | Name | | + | |~~~~~~~~|~~~~~~~~|---...---| | + | | + |================================================================| + +## DocValues + +DocValues Index is `F#` pairs of varints, one pair per field. Each pair of varints indicates start and end point of DocValues slice. + + |================================================================| + | |------...--| | + | |->| DocValues |<-| | + | | |------...--| | | + |==|=================|===========================================|- DocValues Index + ||~|~~~~~~~~~|~~~~~~~|~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| + || DV1 START | DV1 STOP | . . . . . | DV(F#) START | DV(F#) END || + ||~~~~~~~~~~~|~~~~~~~~~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| + |================================================================| + +DocValues is chunked Snappy-compressed values for each document and field. + + [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] + [ Doc# in Chunk | Doc1 | Offset1 | ... | DocN | OffsetN | SNAPPY COMPRESSED DATA ] + [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] + +Last 16 bytes are description of chunks. + + |~~~~~~~~~~~~...~|----------------|----------------| + | Chunk Sizes | Chunk Size Arr | Chunk# | + |~~~~~~~~~~~~...~|----------------|----------------| diff --git a/vendor/github.com/blevesearch/zapx/v14/chunk.go b/vendor/github.com/blugelabs/ice/chunk.go similarity index 52% rename from vendor/github.com/blevesearch/zapx/v14/chunk.go rename to vendor/github.com/blugelabs/ice/chunk.go index 4307d0ed2..009fd15bb 100644 --- a/vendor/github.com/blevesearch/zapx/v14/chunk.go +++ b/vendor/github.com/blugelabs/ice/chunk.go @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,46 +12,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice import ( "fmt" ) -// LegacyChunkMode was the original chunk mode (always chunk size 1024) +const maxDocsToScanSequentially = 1024 + +// legacyChunkMode was the original chunk mode (always chunk size 1024) // this mode is still used for chunking doc values. -var LegacyChunkMode uint32 = 1024 +const legacyChunkMode uint32 = 1024 + +const chunkModeV1 uint32 = 1025 -// DefaultChunkMode is the most recent improvement to chunking and should +// defaultChunkMode is the most recent improvement to chunking and should // be used by default. -var DefaultChunkMode uint32 = 1026 +const defaultChunkMode uint32 = chunkModeV1 -func getChunkSize(chunkMode uint32, cardinality uint64, maxDocs uint64) (uint64, error) { +func getChunkSize(chunkMode uint32, cardinality, maxDocs uint64) (uint64, error) { switch { // any chunkMode <= 1024 will always chunk with chunkSize=chunkMode - case chunkMode <= 1024: + case chunkMode <= legacyChunkMode: // legacy chunk size return uint64(chunkMode), nil - case chunkMode == 1025: - // attempt at simple improvement - // theory - the point of chunking is to put a bound on the maximum number of - // calls to Next() needed to find a random document. ie, you should be able - // to do one jump to the correct chunk, and then walk through at most - // chunk-size items - // previously 1024 was chosen as the chunk size, but this is particularly - // wasteful for low cardinality terms. the observation is that if there - // are less than 1024 items, why not put them all in one chunk, - // this way you'll still achieve the same goal of visiting at most - // chunk-size items. - // no attempt is made to tweak any other case - if cardinality <= 1024 { - return maxDocs, nil - } - return 1024, nil - - case chunkMode == 1026: - // improve upon the ideas tested in chunkMode 1025 + case chunkMode == chunkModeV1: // the observation that the fewest number of dense chunks is the most // desirable layout, given the built-in assumptions of chunking // (that we want to put an upper-bound on the number of items you must @@ -59,7 +45,7 @@ func getChunkSize(chunkMode uint32, cardinality uint64, maxDocs uint64) (uint64, // // 1. compute the number of chunks needed (max 1024/chunk) // 2. convert to chunkSize, dividing into maxDocs - numChunks := (cardinality / 1024) + 1 + numChunks := (cardinality / maxDocsToScanSequentially) + 1 chunkSize := maxDocs / numChunks return chunkSize, nil } diff --git a/vendor/github.com/blevesearch/zapx/v12/contentcoder.go b/vendor/github.com/blugelabs/ice/contentcoder.go similarity index 87% rename from vendor/github.com/blevesearch/zapx/v12/contentcoder.go rename to vendor/github.com/blugelabs/ice/contentcoder.go index c145b5a11..2dc716933 100644 --- a/vendor/github.com/blevesearch/zapx/v12/contentcoder.go +++ b/vendor/github.com/blugelabs/ice/contentcoder.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,24 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice import ( "bytes" "encoding/binary" "io" - "reflect" "github.com/golang/snappy" ) -var reflectStaticSizeMetaData int - -func init() { - var md MetaData - reflectStaticSizeMetaData = int(reflect.TypeOf(md).Size()) -} - var termSeparator byte = 0xff var termSeparatorSplitSlice = []byte{termSeparator} @@ -45,27 +37,27 @@ type chunkedContentCoder struct { chunkMetaBuf bytes.Buffer chunkBuf bytes.Buffer - chunkMeta []MetaData + chunkMeta []metaData compressed []byte // temp buf for snappy compression } -// MetaData represents the data information inside a +// metaData represents the data information inside a // chunk. -type MetaData struct { +type metaData struct { DocNum uint64 // docNum of the data inside the chunk DocDvOffset uint64 // offset of data inside the chunk for the given docid } // newChunkedContentCoder returns a new chunk content coder which // packs data into chunks based on the provided chunkSize -func newChunkedContentCoder(chunkSize uint64, maxDocNum uint64, +func newChunkedContentCoder(chunkSize, maxDocNum uint64, w io.Writer, progressiveWrite bool) *chunkedContentCoder { total := maxDocNum/chunkSize + 1 rv := &chunkedContentCoder{ chunkSize: chunkSize, chunkLens: make([]uint64, total), - chunkMeta: make([]MetaData, 0, total), + chunkMeta: make([]metaData, 0, total), w: w, progressiveWrite: progressiveWrite, } @@ -86,7 +78,7 @@ func (c *chunkedContentCoder) Reset() { c.chunkMeta = c.chunkMeta[:0] } -func (c *chunkedContentCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { +func (c *chunkedContentCoder) SetChunkSize(chunkSize, maxDocNum uint64) { total := int(maxDocNum/chunkSize + 1) c.chunkSize = chunkSize if cap(c.chunkLens) < total { @@ -95,7 +87,7 @@ func (c *chunkedContentCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { c.chunkLens = c.chunkLens[:total] } if cap(c.chunkMeta) < total { - c.chunkMeta = make([]MetaData, 0, total) + c.chunkMeta = make([]metaData, 0, total) } } @@ -116,7 +108,7 @@ func (c *chunkedContentCoder) flushContents() error { // write out the metaData slice for _, meta := range c.chunkMeta { - _, err := writeUvarints(&c.chunkMetaBuf, meta.DocNum, meta.DocDvOffset) + err := writeUvarints(&c.chunkMetaBuf, meta.DocNum, meta.DocDvOffset) if err != nil { return err } @@ -166,7 +158,7 @@ func (c *chunkedContentCoder) Add(docNum uint64, vals []byte) error { return err } - c.chunkMeta = append(c.chunkMeta, MetaData{ + c.chunkMeta = append(c.chunkMeta, metaData{ DocNum: docNum, DocDvOffset: uint64(dvOffset + dvSize), }) @@ -232,10 +224,9 @@ func (c *chunkedContentCoder) Write() (int, error) { return tw, nil } -// ReadDocValueBoundary elicits the start, end offsets from a +// readDocValueBoundary elicits the start, end offsets from a // metaData header slice -func ReadDocValueBoundary(chunk int, metaHeaders []MetaData) (uint64, uint64) { - var start uint64 +func readDocValueBoundary(chunk int, metaHeaders []metaData) (start, end uint64) { if chunk > 0 { start = metaHeaders[chunk-1].DocDvOffset } diff --git a/vendor/github.com/blevesearch/zapx/v12/count.go b/vendor/github.com/blugelabs/ice/count.go similarity index 59% rename from vendor/github.com/blevesearch/zapx/v12/count.go rename to vendor/github.com/blugelabs/ice/count.go index b6135359f..cbbb25f8a 100644 --- a/vendor/github.com/blevesearch/zapx/v12/count.go +++ b/vendor/github.com/blugelabs/ice/count.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,50 +12,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice import ( "hash/crc32" "io" - - segment "github.com/blevesearch/scorch_segment_api/v2" ) -// CountHashWriter is a wrapper around a Writer which counts the number of +// countHashWriter is a wrapper around a Writer which counts the number of // bytes which have been written and computes a crc32 hash -type CountHashWriter struct { +type countHashWriter struct { w io.Writer crc uint32 n int - s segment.StatsReporter -} - -// NewCountHashWriter returns a CountHashWriter which wraps the provided Writer -func NewCountHashWriter(w io.Writer) *CountHashWriter { - return &CountHashWriter{w: w} } -func NewCountHashWriterWithStatsReporter(w io.Writer, s segment.StatsReporter) *CountHashWriter { - return &CountHashWriter{w: w, s: s} +// newCountHashWriter returns a countHashWriter which wraps the provided Writer +func newCountHashWriter(w io.Writer) *countHashWriter { + return &countHashWriter{w: w} } // Write writes the provided bytes to the wrapped writer and counts the bytes -func (c *CountHashWriter) Write(b []byte) (int, error) { +func (c *countHashWriter) Write(b []byte) (int, error) { n, err := c.w.Write(b) c.crc = crc32.Update(c.crc, crc32.IEEETable, b[:n]) c.n += n - if c.s != nil { - c.s.ReportBytesWritten(uint64(n)) - } return n, err } // Count returns the number of bytes written -func (c *CountHashWriter) Count() int { +func (c *countHashWriter) Count() int { return c.n } // Sum32 returns the CRC-32 hash of the content written to this writer -func (c *CountHashWriter) Sum32() uint32 { +func (c *countHashWriter) Sum32() uint32 { return c.crc } diff --git a/vendor/github.com/blevesearch/zapx/v12/dict.go b/vendor/github.com/blugelabs/ice/dict.go similarity index 81% rename from vendor/github.com/blevesearch/zapx/v12/dict.go rename to vendor/github.com/blugelabs/ice/dict.go index e30bf2420..cb4676ac6 100644 --- a/vendor/github.com/blevesearch/zapx/v12/dict.go +++ b/vendor/github.com/blugelabs/ice/dict.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,27 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice import ( "fmt" "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" "github.com/blevesearch/vellum" + segment "github.com/blugelabs/bluge_segment_api" ) -// Dictionary is the zap representation of the term dictionary +// Dictionary is the representation of the term dictionary type Dictionary struct { - sb *SegmentBase + sb *Segment field string fieldID uint16 fst *vellum.FST fstReader *vellum.Reader } -// represents an immutable, empty dictionary +// represents an immutable, empty postings list var emptyDictionary = &Dictionary{} // PostingsList returns the postings list for the specified term @@ -104,9 +103,13 @@ func (d *Dictionary) Contains(key []byte) (bool, error) { return false, nil } -// AutomatonIterator returns an iterator which only visits terms +func (d *Dictionary) Close() error { + return nil +} + +// Iterator returns an iterator which only visits terms // having the the vellum automaton and start/end key range -func (d *Dictionary) AutomatonIterator(a segment.Automaton, +func (d *Dictionary) Iterator(a segment.Automaton, startKeyInclusive, endKeyExclusive []byte) segment.DictionaryIterator { if d.fst != nil { rv := &DictionaryIterator{ @@ -125,34 +128,52 @@ func (d *Dictionary) AutomatonIterator(a segment.Automaton, return emptyDictionaryIterator } +// represents an immutable, empty dictionary iterator +var emptyDictionaryIterator = &DictionaryIterator{} + // DictionaryIterator is an iterator for term dictionary type DictionaryIterator struct { d *Dictionary itr vellum.Iterator err error tmp PostingsList - entry index.DictEntry + entry DictEntry omitCount bool } -var emptyDictionaryIterator = &DictionaryIterator{} - // Next returns the next entry in the dictionary -func (i *DictionaryIterator) Next() (*index.DictEntry, error) { +func (i *DictionaryIterator) Next() (segment.DictionaryEntry, error) { if i.err != nil && i.err != vellum.ErrIteratorDone { return nil, i.err } else if i.itr == nil || i.err == vellum.ErrIteratorDone { return nil, nil } term, postingsOffset := i.itr.Current() - i.entry.Term = string(term) + i.entry.term = string(term) if !i.omitCount { i.err = i.tmp.read(postingsOffset, i.d) if i.err != nil { return nil, i.err } - i.entry.Count = i.tmp.Count() + i.entry.count = i.tmp.Count() } i.err = i.itr.Next() return &i.entry, nil } + +func (i *DictionaryIterator) Close() error { + return nil +} + +type DictEntry struct { + term string + count uint64 +} + +func (d *DictEntry) Term() string { + return d.term +} + +func (d *DictEntry) Count() uint64 { + return d.count +} diff --git a/vendor/github.com/blevesearch/zapx/v12/docvalues.go b/vendor/github.com/blugelabs/ice/docvalues.go similarity index 66% rename from vendor/github.com/blevesearch/zapx/v12/docvalues.go rename to vendor/github.com/blugelabs/ice/docvalues.go index a530aa5ad..382f30d2b 100644 --- a/vendor/github.com/blevesearch/zapx/v12/docvalues.go +++ b/vendor/github.com/blugelabs/ice/docvalues.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,33 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice import ( "bytes" "encoding/binary" "fmt" "math" - "reflect" "sort" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" + segment "github.com/blugelabs/bluge_segment_api" "github.com/golang/snappy" ) -var reflectStaticSizedocValueReader int - -func init() { - var dvi docValueReader - reflectStaticSizedocValueReader = int(reflect.TypeOf(dvi).Size()) -} - type docNumTermsVisitor func(docNum uint64, terms []byte) error type docVisitState struct { dvrs map[uint16]*docValueReader - segment *SegmentBase + segment *Segment } type docValueReader struct { @@ -46,15 +37,15 @@ type docValueReader struct { curChunkNum uint64 chunkOffsets []uint64 dvDataLoc uint64 - curChunkHeader []MetaData + curChunkHeader []metaData curChunkData []byte // compressed data cache uncompressed []byte // temp buf for snappy decompression } func (di *docValueReader) size() int { - return reflectStaticSizedocValueReader + SizeOfPtr + + return reflectStaticSizedocValueReader + sizeOfPtr + len(di.field) + - len(di.chunkOffsets)*SizeOfUint64 + + len(di.chunkOffsets)*sizeOfUint64 + len(di.curChunkHeader)*reflectStaticSizeMetaData + len(di.curChunkData) } @@ -65,7 +56,7 @@ func (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader { } rv.field = di.field - rv.curChunkNum = math.MaxUint64 + rv.curChunkNum = math.MaxInt64 rv.chunkOffsets = di.chunkOffsets // immutable, so it's sharable rv.dvDataLoc = di.dvDataLoc rv.curChunkHeader = rv.curChunkHeader[:0] @@ -79,7 +70,11 @@ func (di *docValueReader) curChunkNumber() uint64 { return di.curChunkNum } -func (s *SegmentBase) loadFieldDocValueReader(field string, +const fieldDvStartWidth = 8 +const fieldDvEndWidth = 8 +const fieldDvStartEndWidth = fieldDvStartWidth + fieldDvEndWidth + +func (s *Segment) loadFieldDocValueReader(field string, fieldDvLocStart, fieldDvLocEnd uint64) (*docValueReader, error) { // get the docValue offset for the given fields if fieldDvLocStart == fieldNotUninverted { @@ -90,10 +85,18 @@ func (s *SegmentBase) loadFieldDocValueReader(field string, // read the number of chunks, and chunk offsets position var numChunks, chunkOffsetsPosition uint64 - if fieldDvLocEnd-fieldDvLocStart > 16 { - numChunks = binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-8 : fieldDvLocEnd]) + if fieldDvLocEnd-fieldDvLocStart > fieldDvStartEndWidth { + numChunksData, err := s.data.Read(int(fieldDvLocEnd-fieldDvEndWidth), int(fieldDvLocEnd)) + if err != nil { + return nil, err + } + numChunks = binary.BigEndian.Uint64(numChunksData) // read the length of chunk offsets - chunkOffsetsLen := binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-16 : fieldDvLocEnd-8]) + chunkOffsetsLenData, err := s.data.Read(int(fieldDvLocEnd-fieldDvStartEndWidth), int(fieldDvLocEnd-fieldDvEndWidth)) + if err != nil { + return nil, err + } + chunkOffsetsLen := binary.BigEndian.Uint64(chunkOffsetsLenData) // acquire position of chunk offsets chunkOffsetsPosition = (fieldDvLocEnd - 16) - chunkOffsetsLen } else { @@ -101,7 +104,7 @@ func (s *SegmentBase) loadFieldDocValueReader(field string, } fdvIter := &docValueReader{ - curChunkNum: math.MaxUint64, + curChunkNum: math.MaxInt64, field: field, chunkOffsets: make([]uint64, int(numChunks)), } @@ -109,7 +112,11 @@ func (s *SegmentBase) loadFieldDocValueReader(field string, // read the chunk offsets var offset uint64 for i := 0; i < int(numChunks); i++ { - loc, read := binary.Uvarint(s.mem[chunkOffsetsPosition+offset : chunkOffsetsPosition+offset+binary.MaxVarintLen64]) + locData, err := s.data.Read(int(chunkOffsetsPosition+offset), int(chunkOffsetsPosition+offset+binary.MaxVarintLen64)) + if err != nil { + return nil, err + } + loc, read := binary.Uvarint(locData) if read <= 0 { return nil, fmt.Errorf("corrupted chunk offset during segment load") } @@ -123,7 +130,7 @@ func (s *SegmentBase) loadFieldDocValueReader(field string, return fdvIter, nil } -func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error { +func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *Segment) error { // advance to the chunk where the docValues // reside for the given docNum destChunkDataLoc, curChunkEnd := di.dvDataLoc, di.dvDataLoc @@ -140,7 +147,11 @@ func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error curChunkEnd += end // read the number of docs reside in the chunk - numDocs, read := binary.Uvarint(s.mem[destChunkDataLoc : destChunkDataLoc+binary.MaxVarintLen64]) + numDocsData, err := s.data.Read(int(destChunkDataLoc), int(destChunkDataLoc+binary.MaxVarintLen64)) + if err != nil { + return err + } + numDocs, read := binary.Uvarint(numDocsData) if read <= 0 { return fmt.Errorf("failed to read the chunk") } @@ -148,26 +159,40 @@ func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error offset := uint64(0) if cap(di.curChunkHeader) < int(numDocs) { - di.curChunkHeader = make([]MetaData, int(numDocs)) + di.curChunkHeader = make([]metaData, int(numDocs)) } else { di.curChunkHeader = di.curChunkHeader[:int(numDocs)] } for i := 0; i < int(numDocs); i++ { - di.curChunkHeader[i].DocNum, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) + var docNumData []byte + docNumData, err = s.data.Read(int(chunkMetaLoc+offset), int(chunkMetaLoc+offset+binary.MaxVarintLen64)) + if err != nil { + return err + } + di.curChunkHeader[i].DocNum, read = binary.Uvarint(docNumData) offset += uint64(read) - di.curChunkHeader[i].DocDvOffset, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) + var docDvOffsetData []byte + docDvOffsetData, err = s.data.Read(int(chunkMetaLoc+offset), int(chunkMetaLoc+offset+binary.MaxVarintLen64)) + if err != nil { + return err + } + di.curChunkHeader[i].DocDvOffset, read = binary.Uvarint(docDvOffsetData) offset += uint64(read) } compressedDataLoc := chunkMetaLoc + offset dataLength := curChunkEnd - compressedDataLoc - di.curChunkData = s.mem[compressedDataLoc : compressedDataLoc+dataLength] + curChunkData, err := s.data.Read(int(compressedDataLoc), int(compressedDataLoc+dataLength)) + if err != nil { + return err + } + di.curChunkData = curChunkData di.curChunkNum = chunkNumber di.uncompressed = di.uncompressed[:0] return nil } -func (di *docValueReader) iterateAllDocValues(s *SegmentBase, visitor docNumTermsVisitor) error { +func (di *docValueReader) iterateAllDocValues(s *Segment, visitor docNumTermsVisitor) error { for i := 0; i < len(di.chunkOffsets); i++ { err := di.loadDvChunk(uint64(i), s) if err != nil { @@ -199,7 +224,7 @@ func (di *docValueReader) iterateAllDocValues(s *SegmentBase, visitor docNumTerm } func (di *docValueReader) visitDocValues(docNum uint64, - visitor index.DocValueVisitor) error { + visitor segment.DocumentValueVisitor) error { // binary search the term locations for the docNum start, end := di.getDocValueLocs(docNum) if start == math.MaxUint64 || end == math.MaxUint64 || start == end { @@ -235,33 +260,31 @@ func (di *docValueReader) visitDocValues(docNum uint64, return nil } -func (di *docValueReader) getDocValueLocs(docNum uint64) (uint64, uint64) { +func (di *docValueReader) getDocValueLocs(docNum uint64) (start, end uint64) { i := sort.Search(len(di.curChunkHeader), func(i int) bool { return di.curChunkHeader[i].DocNum >= docNum }) if i < len(di.curChunkHeader) && di.curChunkHeader[i].DocNum == docNum { - return ReadDocValueBoundary(i, di.curChunkHeader) + return readDocValueBoundary(i, di.curChunkHeader) } return math.MaxUint64, math.MaxUint64 } -// VisitDocValues is an implementation of the -// DocValueVisitable interface -func (s *SegmentBase) VisitDocValues(localDocNum uint64, fields []string, - visitor index.DocValueVisitor, dvsIn segment.DocVisitState) ( - segment.DocVisitState, error) { - dvs, ok := dvsIn.(*docVisitState) - if !ok || dvs == nil { +// VisitDocumentFieldTerms is an implementation of the +// DocumentFieldTermVisitable interface +func (s *Segment) visitDocumentFieldTerms(localDocNum uint64, fields []string, + visitor segment.DocumentValueVisitor, dvs *docVisitState) ( + *docVisitState, error) { + if dvs == nil { dvs = &docVisitState{} - } else { - if dvs.segment != s { - dvs.segment = s - dvs.dvrs = nil - } + } else if dvs.segment != s { + dvs.segment = s + dvs.dvrs = nil } - var fieldIDPlus1 uint16 if dvs.dvrs == nil { + var ok bool + var fieldIDPlus1 uint16 dvs.dvrs = make(map[uint16]*docValueReader, len(fields)) for _, field := range fields { if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { @@ -277,13 +300,15 @@ func (s *SegmentBase) VisitDocValues(localDocNum uint64, fields []string, // find the chunkNumber where the docValues are stored // NOTE: doc values continue to use legacy chunk mode - chunkFactor, err := getChunkSize(LegacyChunkMode, 0, 0) + chunkFactor, err := getChunkSize(legacyChunkMode, 0, 0) if err != nil { return nil, err } docInChunk := localDocNum / chunkFactor var dvr *docValueReader for _, field := range fields { + var ok bool + var fieldIDPlus1 uint16 if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { continue } @@ -303,9 +328,24 @@ func (s *SegmentBase) VisitDocValues(localDocNum uint64, fields []string, return dvs, nil } -// VisitableDocValueFields returns the list of fields with -// persisted doc value terms ready to be visitable using the -// VisitDocumentFieldTerms method. -func (s *SegmentBase) VisitableDocValueFields() ([]string, error) { - return s.fieldDvNames, nil +type DocumentValueReader struct { + fields []string + state *docVisitState + segment *Segment +} + +func (d *DocumentValueReader) VisitDocumentValues(number uint64, visitor segment.DocumentValueVisitor) error { + state, err := d.segment.visitDocumentFieldTerms(number, d.fields, visitor, d.state) + if err != nil { + return err + } + d.state = state + return nil +} + +func (s *Segment) DocumentValueReader(fields []string) (segment.DocumentValueReader, error) { + return &DocumentValueReader{ + fields: fields, + segment: s, + }, nil } diff --git a/vendor/github.com/blevesearch/zapx/v12/enumerator.go b/vendor/github.com/blugelabs/ice/enumerator.go similarity index 91% rename from vendor/github.com/blevesearch/zapx/v12/enumerator.go rename to vendor/github.com/blugelabs/ice/enumerator.go index 972a22416..56e5bc8f5 100644 --- a/vendor/github.com/blevesearch/zapx/v12/enumerator.go +++ b/vendor/github.com/blugelabs/ice/enumerator.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice import ( "bytes" @@ -80,22 +80,20 @@ func (m *enumerator) updateMatches(skipEmptyKey bool) { // Current returns the enumerator's current key, iterator-index, and // value. If the enumerator is not pointing at a valid value (because // Next returned an error previously), Current will return nil,0,0. -func (m *enumerator) Current() ([]byte, int, uint64) { - var i int - var v uint64 +func (m *enumerator) Current() (key []byte, index int, val uint64) { if m.lowCurr < len(m.lowIdxs) { - i = m.lowIdxs[m.lowCurr] - v = m.currVs[i] + index = m.lowIdxs[m.lowCurr] + val = m.currVs[index] } - return m.lowK, i, v + return m.lowK, index, val } // GetLowIdxsAndValues will return all of the iterator indices // which point to the current key, and their corresponding // values. This can be used by advanced caller which may need // to peek into these other sets of data before processing. -func (m *enumerator) GetLowIdxsAndValues() ([]int, []uint64) { - values := make([]uint64, 0, len(m.lowIdxs)) +func (m *enumerator) GetLowIdxsAndValues() (lowIdxs []int, values []uint64) { + values = make([]uint64, 0, len(m.lowIdxs)) for _, idx := range m.lowIdxs { values = append(values, m.currVs[idx]) } @@ -105,7 +103,7 @@ func (m *enumerator) GetLowIdxsAndValues() ([]int, []uint64) { // Next advances the enumerator to the next key/iterator/value result, // else vellum.ErrIteratorDone is returned. func (m *enumerator) Next() error { - m.lowCurr += 1 + m.lowCurr++ if m.lowCurr >= len(m.lowIdxs) { // move all the current low iterators forwards for _, vi := range m.lowIdxs { diff --git a/vendor/github.com/blugelabs/ice/footer.go b/vendor/github.com/blugelabs/ice/footer.go new file mode 100644 index 000000000..3018065f5 --- /dev/null +++ b/vendor/github.com/blugelabs/ice/footer.go @@ -0,0 +1,119 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ice + +import ( + "encoding/binary" + "fmt" + + segment "github.com/blugelabs/bluge_segment_api" +) + +// Ice footer +// +// |========|========|========|========|====|====|====| +// | D# | SF | F | FDV | CM | V | CC | +// |========|====|===|====|===|====|===|====|====|====| +// +// D# - number of docs +// SF - stored fields index offset +// F - field index offset +// FDV - field doc values offset +// CM - chunk Mode +// V - version +// CC - crc32 + +type footer struct { + storedIndexOffset uint64 + docValueOffset uint64 + fieldsIndexOffset uint64 + numDocs uint64 + crc uint32 + version uint32 + chunkMode uint32 +} + +const ( + crcWidth = 4 + verWidth = 4 + chunkWidth = 4 + fdvOffsetWidth = 8 + fieldsOffsetWidth = 8 + storedOffsetWidth = 8 + numDocsWidth = 8 + footerLen = crcWidth + verWidth + chunkWidth + fdvOffsetWidth + + fieldsOffsetWidth + storedOffsetWidth + numDocsWidth +) + +func parseFooter(data *segment.Data) (*footer, error) { + if data.Len() < footerLen { + return nil, fmt.Errorf("data len %d less than footer len %d", data.Len(), + footerLen) + } + + rv := &footer{} + crcOffset := data.Len() - crcWidth + crcData, err := data.Read(crcOffset, crcOffset+crcWidth) + if err != nil { + return nil, err + } + rv.crc = binary.BigEndian.Uint32(crcData) + + verOffset := crcOffset - verWidth + verData, err := data.Read(verOffset, verOffset+verWidth) + if err != nil { + return nil, err + } + rv.version = binary.BigEndian.Uint32(verData) + if rv.version != Version { + return nil, fmt.Errorf("unsupported version %d", rv.version) + } + + chunkOffset := verOffset - chunkWidth + chunkData, err := data.Read(chunkOffset, chunkOffset+chunkWidth) + if err != nil { + return nil, err + } + rv.chunkMode = binary.BigEndian.Uint32(chunkData) + + docValueOffset := chunkOffset - fdvOffsetWidth + docValueData, err := data.Read(docValueOffset, docValueOffset+fdvOffsetWidth) + if err != nil { + return nil, err + } + rv.docValueOffset = binary.BigEndian.Uint64(docValueData) + + fieldsIndexOffset := docValueOffset - fieldsOffsetWidth + fieldsData, err := data.Read(fieldsIndexOffset, fieldsIndexOffset+fieldsOffsetWidth) + if err != nil { + return nil, err + } + rv.fieldsIndexOffset = binary.BigEndian.Uint64(fieldsData) + + storedIndexOffset := fieldsIndexOffset - storedOffsetWidth + storedData, err := data.Read(storedIndexOffset, storedIndexOffset+storedOffsetWidth) + if err != nil { + return nil, err + } + rv.storedIndexOffset = binary.BigEndian.Uint64(storedData) + + numDocsOffset := storedIndexOffset - numDocsWidth + numDocsData, err := data.Read(numDocsOffset, numDocsOffset+numDocsWidth) + if err != nil { + return nil, err + } + rv.numDocs = binary.BigEndian.Uint64(numDocsData) + return rv, nil +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/boost.go b/vendor/github.com/blugelabs/ice/freq.go similarity index 62% rename from vendor/github.com/blevesearch/bleve/v2/search/query/boost.go rename to vendor/github.com/blugelabs/ice/freq.go index 13659945d..b61c68687 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/boost.go +++ b/vendor/github.com/blugelabs/ice/freq.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014 Couchbase, Inc. +// Copyright (c) 2020 The Bluge Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,22 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -package query +package ice -import "fmt" - -type Boost float64 +type tokenLocation struct { + FieldVal string + StartVal int + EndVal int + PositionVal int +} -func (b *Boost) Value() float64 { - if b == nil { - return 1.0 - } - return float64(*b) +type tokenFreq struct { + TermVal []byte + Locations []*tokenLocation + frequency int } -func (b *Boost) GoString() string { - if b == nil { - return "boost unspecified" - } - return fmt.Sprintf("%f", *b) +func (tf *tokenFreq) Frequency() int { + return tf.frequency } + +type tokenFrequencies map[string]*tokenFreq diff --git a/vendor/github.com/blevesearch/zapx/v15/intcoder.go b/vendor/github.com/blugelabs/ice/intcoder.go similarity index 85% rename from vendor/github.com/blevesearch/zapx/v15/intcoder.go rename to vendor/github.com/blugelabs/ice/intcoder.go index c3c488fb7..28749b145 100644 --- a/vendor/github.com/blevesearch/zapx/v15/intcoder.go +++ b/vendor/github.com/blugelabs/ice/intcoder.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice import ( "bytes" @@ -39,7 +39,7 @@ type chunkedIntCoder struct { // newChunkedIntCoder returns a new chunk int coder which packs data into // chunks based on the provided chunkSize and supports up to the specified // maxDocNum -func newChunkedIntCoder(chunkSize uint64, maxDocNum uint64) *chunkedIntCoder { +func newChunkedIntCoder(chunkSize, maxDocNum uint64) *chunkedIntCoder { total := maxDocNum/chunkSize + 1 rv := &chunkedIntCoder{ chunkSize: chunkSize, @@ -63,7 +63,7 @@ func (c *chunkedIntCoder) Reset() { // SetChunkSize changes the chunk size. It is only valid to do so // with a new chunkedIntCoder, or immediately after calling Reset() -func (c *chunkedIntCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { +func (c *chunkedIntCoder) SetChunkSize(chunkSize, maxDocNum uint64) { total := int(maxDocNum/chunkSize + 1) c.chunkSize = chunkSize if cap(c.chunkLens) < total { @@ -99,19 +99,6 @@ func (c *chunkedIntCoder) Add(docNum uint64, vals ...uint64) error { return nil } -func (c *chunkedIntCoder) AddBytes(docNum uint64, buf []byte) error { - chunk := docNum / c.chunkSize - if chunk != c.currChunk { - // starting a new chunk - c.Close() - c.chunkBuf.Reset() - c.currChunk = chunk - } - - _, err := c.chunkBuf.Write(buf) - return err -} - // Close indicates you are done calling Add() this allows the final chunk // to be encoded. func (c *chunkedIntCoder) Close() { @@ -154,18 +141,18 @@ func (c *chunkedIntCoder) Write(w io.Writer) (int, error) { // writeAt commits all the encoded chunked integers to the provided writer // and returns the starting offset, total bytes written and an error -func (c *chunkedIntCoder) writeAt(w io.Writer) (uint64, int, error) { - startOffset := uint64(termNotEncoded) - if len(c.final) <= 0 { - return startOffset, 0, nil +func (c *chunkedIntCoder) writeAt(w io.Writer) (startOffset uint64, err error) { + startOffset = uint64(termNotEncoded) + if len(c.final) == 0 { + return startOffset, nil } - if chw := w.(*CountHashWriter); chw != nil { + if chw := w.(*countHashWriter); chw != nil { startOffset = uint64(chw.Count()) } - tw, err := c.Write(w) - return startOffset, tw, err + _, err = c.Write(w) + return startOffset, err } func (c *chunkedIntCoder) FinalSize() int { @@ -197,8 +184,7 @@ func modifyLengthsToEndOffsets(lengths []uint64) []uint64 { return lengths } -func readChunkBoundary(chunk int, offsets []uint64) (uint64, uint64) { - var start uint64 +func readChunkBoundary(chunk int, offsets []uint64) (start, end uint64) { if chunk > 0 { start = offsets[chunk-1] } diff --git a/vendor/github.com/blevesearch/zapx/v15/intDecoder.go b/vendor/github.com/blugelabs/ice/intdecoder.go similarity index 72% rename from vendor/github.com/blevesearch/zapx/v15/intDecoder.go rename to vendor/github.com/blugelabs/ice/intdecoder.go index 2f777fc98..b9a057594 100644 --- a/vendor/github.com/blevesearch/zapx/v15/intDecoder.go +++ b/vendor/github.com/blugelabs/ice/intdecoder.go @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,11 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice import ( "encoding/binary" "fmt" + + segment "github.com/blugelabs/bluge_segment_api" ) type chunkedIntDecoder struct { @@ -24,25 +26,27 @@ type chunkedIntDecoder struct { dataStartOffset uint64 chunkOffsets []uint64 curChunkBytes []byte - data []byte + data *segment.Data r *memUvarintReader } -// newChunkedIntDecoder expects an optional or reset chunkedIntDecoder for better reuse. -func newChunkedIntDecoder(buf []byte, offset uint64, rv *chunkedIntDecoder) *chunkedIntDecoder { +func newChunkedIntDecoder(data *segment.Data, offset uint64, rv *chunkedIntDecoder) (*chunkedIntDecoder, error) { if rv == nil { - rv = &chunkedIntDecoder{startOffset: offset, data: buf} + rv = &chunkedIntDecoder{startOffset: offset, data: data} } else { rv.startOffset = offset - rv.data = buf + rv.data = data } - var n, numChunks uint64 var read int if offset == termNotEncoded { numChunks = 0 } else { - numChunks, read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64]) + numChunksData, err := data.Read(int(offset+n), int(offset+n+binary.MaxVarintLen64)) + if err != nil { + return nil, err + } + numChunks, read = binary.Uvarint(numChunksData) } n += uint64(read) @@ -52,11 +56,15 @@ func newChunkedIntDecoder(buf []byte, offset uint64, rv *chunkedIntDecoder) *chu rv.chunkOffsets = make([]uint64, int(numChunks)) } for i := 0; i < int(numChunks); i++ { - rv.chunkOffsets[i], read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64]) + chunkOffsetData, err := data.Read(int(offset+n), int(offset+n+binary.MaxVarintLen64)) + if err != nil { + return nil, err + } + rv.chunkOffsets[i], read = binary.Uvarint(chunkOffsetData) n += uint64(read) } rv.dataStartOffset = offset + n - return rv + return rv, nil } func (d *chunkedIntDecoder) loadChunk(chunk int) error { @@ -74,7 +82,11 @@ func (d *chunkedIntDecoder) loadChunk(chunk int) error { s, e := readChunkBoundary(chunk, d.chunkOffsets) start += s end += e - d.curChunkBytes = d.data[start:end] + curChunkBytesData, err := d.data.Read(int(start), int(end)) + if err != nil { + return err + } + d.curChunkBytes = curChunkBytesData if d.r == nil { d.r = newMemUvarintReader(d.curChunkBytes) } else { @@ -89,7 +101,10 @@ func (d *chunkedIntDecoder) reset() { d.dataStartOffset = 0 d.chunkOffsets = d.chunkOffsets[:0] d.curChunkBytes = d.curChunkBytes[:0] - d.data = d.data[:0] + + // FIXME what? + // d.data = d.data[:0] + d.data = nil if d.r != nil { d.r.Reset([]byte(nil)) } @@ -103,10 +118,6 @@ func (d *chunkedIntDecoder) readUvarint() (uint64, error) { return d.r.ReadUvarint() } -func (d *chunkedIntDecoder) readBytes(start, end int) []byte { - return d.curChunkBytes[start:end] -} - func (d *chunkedIntDecoder) SkipUvarint() { d.r.SkipUvarint() } @@ -118,7 +129,3 @@ func (d *chunkedIntDecoder) SkipBytes(count int) { func (d *chunkedIntDecoder) Len() int { return d.r.Len() } - -func (d *chunkedIntDecoder) remainingLen() int { - return len(d.curChunkBytes) - d.r.Len() -} diff --git a/vendor/github.com/blugelabs/ice/load.go b/vendor/github.com/blugelabs/ice/load.go new file mode 100644 index 000000000..97b041f91 --- /dev/null +++ b/vendor/github.com/blugelabs/ice/load.go @@ -0,0 +1,130 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ice + +import ( + "encoding/binary" + "fmt" + + "github.com/blevesearch/vellum" + segment "github.com/blugelabs/bluge_segment_api" +) + +// Open returns an impl of a segment +func Load(data *segment.Data) (segment.Segment, error) { + return load(data) +} + +func load(data *segment.Data) (*Segment, error) { + footer, err := parseFooter(data) + if err != nil { + return nil, fmt.Errorf("error parsing footer: %w", err) + } + rv := &Segment{ + data: data.Slice(0, data.Len()-footerLen), + footer: footer, + fieldsMap: make(map[string]uint16), + fieldDvReaders: make(map[uint16]*docValueReader), + fieldFSTs: make(map[uint16]*vellum.FST), + fieldDocs: make(map[uint16]uint64), + fieldFreqs: make(map[uint16]uint64), + } + + // FIXME temporarily map to existing footer fields + // rv.memCRC = footer.crc + // rv.chunkMode = footer.chunkMode + // rv.numDocs = footer.numDocs + // rv.storedIndexOffset = footer.storedIndexOffset + // rv.fieldsIndexOffset = footer.fieldsIndexOffset + // rv.docValueOffset = footer.docValueOffset + + err = rv.loadFields() + if err != nil { + return nil, err + } + + err = rv.loadDvReaders() + if err != nil { + return nil, err + } + + rv.updateSize() + + return rv, nil +} + +const fileAddrWidth = 8 + +func (s *Segment) loadFields() error { + // NOTE for now we assume the fields index immediately precedes + // the footer, and if this changes, need to adjust accordingly (or + // store explicit length), where s.mem was sliced from s.mm in Open(). + fieldsIndexEnd := uint64(s.data.Len()) + + // iterate through fields index + var fieldID uint64 + for s.footer.fieldsIndexOffset+(fileAddrWidth*fieldID) < fieldsIndexEnd { + addrData, err := s.data.Read(int(s.footer.fieldsIndexOffset+(fileAddrWidth*fieldID)), + int(s.footer.fieldsIndexOffset+(fileAddrWidth*fieldID)+fileAddrWidth)) + if err != nil { + return err + } + addr := binary.BigEndian.Uint64(addrData) + + dictLocData, err := s.data.Read(int(addr), int(fieldsIndexEnd)) + if err != nil { + return err + } + dictLoc, read := binary.Uvarint(dictLocData) + n := uint64(read) + s.dictLocs = append(s.dictLocs, dictLoc) + + var nameLen uint64 + nameLenData, err := s.data.Read(int(addr+n), int(fieldsIndexEnd)) + if err != nil { + return err + } + nameLen, read = binary.Uvarint(nameLenData) + n += uint64(read) + + nameData, err := s.data.Read(int(addr+n), int(addr+n+nameLen)) + if err != nil { + return err + } + n += nameLen + + fieldDocData, err := s.data.Read(int(addr+n), int(fieldsIndexEnd)) + if err != nil { + return err + } + fieldDocVal, read := binary.Uvarint(fieldDocData) + n += uint64(read) + + fieldFreqData, err := s.data.Read(int(addr+n), int(fieldsIndexEnd)) + if err != nil { + return err + } + fieldFreqVal, _ := binary.Uvarint(fieldFreqData) + + name := string(nameData) + s.fieldsInv = append(s.fieldsInv, name) + s.fieldsMap[name] = uint16(fieldID + 1) + s.fieldDocs[uint16(fieldID)] = fieldDocVal + s.fieldFreqs[uint16(fieldID)] = fieldFreqVal + + fieldID++ + } + return nil +} diff --git a/vendor/github.com/blevesearch/zapx/v14/memuvarint.go b/vendor/github.com/blugelabs/ice/memuvarint.go similarity index 66% rename from vendor/github.com/blevesearch/zapx/v14/memuvarint.go rename to vendor/github.com/blugelabs/ice/memuvarint.go index 0c10c83a4..67b349b0c 100644 --- a/vendor/github.com/blevesearch/zapx/v14/memuvarint.go +++ b/vendor/github.com/blugelabs/ice/memuvarint.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Couchbase, Inc. +// Copyright (c) 2020 The Bluge Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice -import ( - "fmt" -) +import "fmt" + +// ------------------------------------------------------------ type memUvarintReader struct { C int // index of next byte to read from S @@ -36,6 +36,22 @@ func (r *memUvarintReader) Len() int { return n } +// why 63? The original code had an 'i += 1' loop var and +// checked for i > 9 || i == 9 ...; but, we no longer +// check for the i var, but instead check here for s, +// which is incremented by 7. So, 7*9 == 63. +const sevenTimesNine = 63 + +// lastByte has the most significant bit set +// indicating there are more bytes in the stream +// any value less than this is a terminal byte +const lastByte = 0x80 + +// significantBits masks the significant bits +// the highest order bit is used to indicate +// the presence of more data +const significantBits = 0x7f + // ReadUvarint reads an encoded uint64. The original code this was // based on is at encoding/binary/ReadUvarint(). func (r *memUvarintReader) ReadUvarint() (uint64, error) { @@ -48,25 +64,20 @@ func (r *memUvarintReader) ReadUvarint() (uint64, error) { b := S[C] C++ - if b < 0x80 { + if b < lastByte { r.C = C - // why 63? The original code had an 'i += 1' loop var and - // checked for i > 9 || i == 9 ...; but, we no longer - // check for the i var, but instead check here for s, - // which is incremented by 7. So, 7*9 == 63. - // // why the "extra" >= check? The normal case is that s < // 63, so we check this single >= guard first so that we // hit the normal, nil-error return pathway sooner. - if s >= 63 && (s > 63 || s == 63 && b > 1) { + if s >= sevenTimesNine && (s > sevenTimesNine || s == sevenTimesNine && b > 1) { return 0, fmt.Errorf("memUvarintReader overflow") } return x | uint64(b)< 0 { + storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops, + fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh) + if err != nil { + return nil, nil, err + } + + dictLocs, fieldDocs, fieldFreqs, docValueOffset, err = persistMergedRest(segments, drops, + fieldsInv, fieldsMap, + newDocNums, numDocs, chunkMode, cr, closeCh) + if err != nil { + return nil, nil, err + } + } else { + dictLocs = make([]uint64, len(fieldsInv)) + } + + var fieldsIndexOffset uint64 + fieldsIndexOffset, err = persistFields(fieldsInv, fieldDocs, fieldFreqs, cr, dictLocs) + if err != nil { + return nil, nil, err + } + + return newDocNums, &footer{ + numDocs: numDocs, + storedIndexOffset: storedIndexOffset, + fieldsIndexOffset: fieldsIndexOffset, + docValueOffset: docValueOffset, + }, nil +} + +// mapFields takes the fieldsInv list and returns a map of fieldName +// to fieldID+1 +func mapFields(fields []string) map[string]uint16 { + rv := make(map[string]uint16, len(fields)) + for i, fieldName := range fields { + rv[fieldName] = uint16(i) + 1 + } + return rv +} + +// computeNewDocCount determines how many documents will be in the newly +// merged segment when obsoleted docs are dropped +func computeNewDocCount(segments []*Segment, drops []*roaring.Bitmap) uint64 { + var newDocCount uint64 + for segI, seg := range segments { + newDocCount += seg.footer.numDocs + if drops[segI] != nil { + newDocCount -= drops[segI].GetCardinality() + } + } + return newDocCount +} + +func persistMergedRest(segments []*Segment, dropsIn []*roaring.Bitmap, + fieldsInv []string, fieldsMap map[string]uint16, + newDocNumsIn [][]uint64, newSegDocCount uint64, chunkMode uint32, + w *countHashWriter, closeCh chan struct{}) (dictLocs []uint64, fieldDocs, + fieldFreqs map[uint16]uint64, docValueOffset uint64, err error) { + var bufMaxVarintLen64 = make([]byte, binary.MaxVarintLen64) + + dictLocs = make([]uint64, len(fieldsInv)) + fieldDvLocsStart := make([]uint64, len(fieldsInv)) + fieldDvLocsEnd := make([]uint64, len(fieldsInv)) + + // these int coders are initialized with chunk size 1024 + // however this will be reset to the correct chunk size + // while processing each individual field-term section + tfEncoder := newChunkedIntCoder(uint64(legacyChunkMode), newSegDocCount-1) + locEncoder := newChunkedIntCoder(uint64(legacyChunkMode), newSegDocCount-1) + + var vellumBuf bytes.Buffer + newVellum, err := vellum.New(&vellumBuf, nil) + if err != nil { + return nil, nil, nil, 0, err + } + + newRoaring := roaring.NewBitmap() + + fieldDocs = map[uint16]uint64{} + fieldDocTracking := roaring.NewBitmap() + fieldFreqs = map[uint16]uint64{} + + // for each field + for fieldID, fieldName := range fieldsInv { + err = persistMergedRestField(segments, dropsIn, fieldsMap, newDocNumsIn, newSegDocCount, chunkMode, w, + closeCh, fieldName, newRoaring, fieldDocTracking, tfEncoder, locEncoder, newVellum, &vellumBuf, + bufMaxVarintLen64, fieldFreqs, fieldID, dictLocs, fieldDvLocsStart, fieldDvLocsEnd) + if err != nil { + return nil, nil, nil, 0, err + } + + // reset vellum buffer and vellum builder + vellumBuf.Reset() + err = newVellum.Reset(&vellumBuf) + if err != nil { + return nil, nil, nil, 0, err + } + + fieldDocs[uint16(fieldID)] += fieldDocTracking.GetCardinality() + } + + docValueOffset, err = writeDvLocs(w, bufMaxVarintLen64, fieldDvLocsStart, fieldDvLocsEnd) + if err != nil { + return nil, nil, nil, 0, err + } + + return dictLocs, fieldDocs, fieldFreqs, docValueOffset, nil +} + +func persistMergedRestField(segments []*Segment, dropsIn []*roaring.Bitmap, fieldsMap map[string]uint16, + newDocNumsIn [][]uint64, newSegDocCount uint64, chunkMode uint32, w *countHashWriter, closeCh chan struct{}, + fieldName string, newRoaring, fieldDocTracking *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder, + newVellum *vellum.Builder, vellumBuf *bytes.Buffer, bufMaxVarintLen64 []byte, fieldFreqs map[uint16]uint64, + fieldID int, dictLocs, fieldDvLocsStart, fieldDvLocsEnd []uint64) error { + var postings *PostingsList + var postItr *PostingsIterator + var bufLoc []uint64 + + // collect FST iterators from all active segments for this field + newDocNums, drops, dicts, itrs, segmentsInFocus, err := + setupActiveForField(segments, dropsIn, newDocNumsIn, closeCh, fieldName) + if err != nil { + return err + } + + var prevTerm []byte + + newRoaring.Clear() + fieldDocTracking.Clear() + + var lastDocNum uint64 + var lastFreq, lastNorm uint64 + + enumerator, err := newEnumerator(itrs) + + for err == nil { + term, itrI, postingsOffset := enumerator.Current() + + if !bytes.Equal(prevTerm, term) { + // check for the closure in meantime + if isClosed(closeCh) { + return segment.ErrClosed + } + + // if the term changed, write out the info collected for the previous term + err = finishTerm(w, newRoaring, tfEncoder, locEncoder, newVellum, bufMaxVarintLen64, prevTerm, &lastDocNum, + &lastFreq, &lastNorm) + if err != nil { + return err + } + } + + if !bytes.Equal(prevTerm, term) || prevTerm == nil { + err = prepareNewTerm(newSegDocCount, chunkMode, tfEncoder, locEncoder, fieldFreqs, fieldID, enumerator, + dicts, drops) + if err != nil { + return err + } + } + + postings, err = dicts[itrI].postingsListFromOffset( + postingsOffset, drops[itrI], postings) + if err != nil { + return err + } + + postItr, err = postings.iterator(true, true, true, postItr) + if err != nil { + return err + } + + // can no longer optimize by copying, since chunk factor could have changed + lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs( + fieldsMap, postItr, newDocNums[itrI], newRoaring, + tfEncoder, locEncoder, bufLoc, fieldDocTracking) + + if err != nil { + return err + } + + prevTerm = prevTerm[:0] // copy to prevTerm in case Next() reuses term mem + prevTerm = append(prevTerm, term...) + + err = enumerator.Next() + } + if err != vellum.ErrIteratorDone { + return err + } + + err = finishTerm(w, newRoaring, tfEncoder, locEncoder, newVellum, bufMaxVarintLen64, prevTerm, &lastDocNum, + &lastFreq, &lastNorm) + if err != nil { + return err + } + + err = writeMergedDict(w, newVellum, vellumBuf, bufMaxVarintLen64, fieldID, dictLocs) + if err != nil { + return err + } + + err = buildMergedDocVals(newSegDocCount, w, closeCh, fieldName, fieldID, fieldDvLocsStart, fieldDvLocsEnd, + segmentsInFocus, newDocNums) + if err != nil { + return err + } + return nil +} + +func writeMergedDict(w *countHashWriter, newVellum io.Closer, vellumBuf *bytes.Buffer, + bufMaxVarintLen64 []byte, fieldID int, dictLocs []uint64) error { + dictOffset := uint64(w.Count()) + + err := newVellum.Close() + if err != nil { + return err + } + vellumData := vellumBuf.Bytes() + + // write out the length of the vellum data + n := binary.PutUvarint(bufMaxVarintLen64, uint64(len(vellumData))) + _, err = w.Write(bufMaxVarintLen64[:n]) + if err != nil { + return err + } + + // write this vellum to disk + _, err = w.Write(vellumData) + if err != nil { + return err + } + + dictLocs[fieldID] = dictOffset + return nil +} + +func buildMergedDocVals(newSegDocCount uint64, w *countHashWriter, closeCh chan struct{}, fieldName string, fieldID int, + fieldDvLocsStart, fieldDvLocsEnd []uint64, segmentsInFocus []*Segment, newDocNums [][]uint64) error { + // get the field doc value offset (start) + fieldDvLocsStart[fieldID] = uint64(w.Count()) + + // update the field doc values + // NOTE: doc values continue to use legacy chunk mode + chunkSize, err := getChunkSize(legacyChunkMode, 0, 0) + if err != nil { + return err + } + fdvEncoder := newChunkedContentCoder(chunkSize, newSegDocCount-1, w, true) + + fdvReadersAvailable := false + var dvIterClone *docValueReader + for segmentI, seg := range segmentsInFocus { + segmentI := segmentI + // check for the closure in meantime + if isClosed(closeCh) { + return segment.ErrClosed + } + + fieldIDPlus1 := seg.fieldsMap[fieldName] + if dvIter, exists := seg.fieldDvReaders[fieldIDPlus1-1]; exists && + dvIter != nil { + fdvReadersAvailable = true + dvIterClone = dvIter.cloneInto(dvIterClone) + err = dvIterClone.iterateAllDocValues(seg, func(docNum uint64, terms []byte) error { + if newDocNums[segmentI][docNum] == docDropped { + return nil + } + err2 := fdvEncoder.Add(newDocNums[segmentI][docNum], terms) + if err2 != nil { + return err2 + } + return nil + }) + if err != nil { + return err + } + } + } + + if fdvReadersAvailable { + err = fdvEncoder.Close() + if err != nil { + return err + } + + // persist the doc value details for this field + _, err = fdvEncoder.Write() + if err != nil { + return err + } + + // get the field doc value offset (end) + fieldDvLocsEnd[fieldID] = uint64(w.Count()) + } else { + fieldDvLocsStart[fieldID] = fieldNotUninverted + fieldDvLocsEnd[fieldID] = fieldNotUninverted + } + return nil +} + +func prepareNewTerm(newSegDocCount uint64, chunkMode uint32, tfEncoder, locEncoder *chunkedIntCoder, + fieldFreqs map[uint16]uint64, fieldID int, enumerator *enumerator, dicts []*Dictionary, + drops []*roaring.Bitmap) error { + var err error + + // compute cardinality of field-term in new seg + var newCard uint64 + lowItrIdxs, lowItrVals := enumerator.GetLowIdxsAndValues() + for i, idx := range lowItrIdxs { + var pl *PostingsList + pl, err = dicts[idx].postingsListFromOffset(lowItrVals[i], drops[idx], nil) + if err != nil { + return err + } + newCard += pl.Count() + fieldFreqs[uint16(fieldID)] += newCard + } + // compute correct chunk size with this + var chunkSize uint64 + chunkSize, err = getChunkSize(chunkMode, newCard, newSegDocCount) + if err != nil { + return err + } + // update encoders chunk + tfEncoder.SetChunkSize(chunkSize, newSegDocCount-1) + locEncoder.SetChunkSize(chunkSize, newSegDocCount-1) + return nil +} + +func finishTerm(w *countHashWriter, newRoaring *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder, + newVellum *vellum.Builder, bufMaxVarintLen64, term []byte, lastDocNum, lastFreq, lastNorm *uint64) error { + tfEncoder.Close() + locEncoder.Close() + + // determines whether to use "1-hit" encoding optimization + // when a term appears in only 1 doc, with no loc info, + // has freq of 1, and the docNum fits into 31-bits + use1HitEncoding := func(termCardinality uint64) (bool, uint64, uint64) { + if termCardinality == uint64(1) && locEncoder.FinalSize() <= 0 { + docNum := uint64(newRoaring.Minimum()) + if under32Bits(docNum) && docNum == *lastDocNum && *lastFreq == 1 { + return true, docNum, *lastNorm + } + } + return false, 0, 0 + } + + postingsOffset, err := writePostings(newRoaring, + tfEncoder, locEncoder, use1HitEncoding, w, bufMaxVarintLen64) + if err != nil { + return err + } + + if postingsOffset > 0 { + err = newVellum.Insert(term, postingsOffset) + if err != nil { + return err + } + } + + newRoaring.Clear() + + tfEncoder.Reset() + locEncoder.Reset() + + *lastDocNum = 0 + *lastFreq = 0 + *lastNorm = 0 + + return nil +} + +func writeDvLocs(w *countHashWriter, bufMaxVarintLen64 []byte, fieldDvLocsStart, fieldDvLocsEnd []uint64) (uint64, error) { + fieldDvLocsOffset := uint64(w.Count()) + + buf := bufMaxVarintLen64 + for i := 0; i < len(fieldDvLocsStart); i++ { + n := binary.PutUvarint(buf, fieldDvLocsStart[i]) + _, err := w.Write(buf[:n]) + if err != nil { + return 0, err + } + n = binary.PutUvarint(buf, fieldDvLocsEnd[i]) + _, err = w.Write(buf[:n]) + if err != nil { + return 0, err + } + } + return fieldDvLocsOffset, nil +} + +func setupActiveForField(segments []*Segment, dropsIn []*roaring.Bitmap, newDocNumsIn [][]uint64, closeCh chan struct{}, + fieldName string) (newDocNums [][]uint64, drops []*roaring.Bitmap, dicts []*Dictionary, itrs []vellum.Iterator, + segmentsInFocus []*Segment, err error) { + for segmentI, seg := range segments { + // check for the closure in meantime + if isClosed(closeCh) { + return nil, nil, nil, nil, nil, segment.ErrClosed + } + + var dict *Dictionary + dict, err = seg.dictionary(fieldName) + if err != nil { + return nil, nil, nil, nil, nil, err + } + if dict != nil && dict.fst != nil { + var itr *vellum.FSTIterator + itr, err = dict.fst.Iterator(nil, nil) + if err != nil && err != vellum.ErrIteratorDone { + return nil, nil, nil, nil, nil, err + } + if itr != nil { + newDocNums = append(newDocNums, newDocNumsIn[segmentI]) + if dropsIn[segmentI] != nil && !dropsIn[segmentI].IsEmpty() { + drops = append(drops, dropsIn[segmentI]) + } else { + drops = append(drops, nil) + } + dicts = append(dicts, dict) + itrs = append(itrs, itr) + segmentsInFocus = append(segmentsInFocus, seg) + } + } + } + return newDocNums, drops, dicts, itrs, segmentsInFocus, nil +} + +const numUintsLocation = 4 + +func mergeTermFreqNormLocs(fieldsMap map[string]uint16, postItr *PostingsIterator, + newDocNums []uint64, newRoaring *roaring.Bitmap, + tfEncoder, locEncoder *chunkedIntCoder, bufLoc []uint64, docTracking *roaring.Bitmap) ( + lastDocNum, lastFreq, lastNorm uint64, bufLocOut []uint64, err error) { + next, err := postItr.Next() + for next != nil && err == nil { + hitNewDocNum := newDocNums[next.Number()] + if hitNewDocNum == docDropped { + return 0, 0, 0, nil, fmt.Errorf("see hit with dropped docNum") + } + + newRoaring.Add(uint32(hitNewDocNum)) + docTracking.Add(uint32(hitNewDocNum)) + + nextFreq := next.Frequency() + nextNorm := uint64(math.Float32bits(float32(next.Norm()))) + + locs := next.Locations() + + err = tfEncoder.Add(hitNewDocNum, + encodeFreqHasLocs(uint64(nextFreq), len(locs) > 0), nextNorm) + if err != nil { + return 0, 0, 0, nil, err + } + + if len(locs) > 0 { + numBytesLocs := 0 + for _, loc := range locs { + numBytesLocs += totalUvarintBytes(uint64(fieldsMap[loc.Field()]-1), + uint64(loc.Pos()), uint64(loc.Start()), uint64(loc.End())) + } + + err = locEncoder.Add(hitNewDocNum, uint64(numBytesLocs)) + if err != nil { + return 0, 0, 0, nil, err + } + + for _, loc := range locs { + if cap(bufLoc) < numUintsLocation { + bufLoc = make([]uint64, 0, numUintsLocation) + } + args := bufLoc[0:4] + args[0] = uint64(fieldsMap[loc.Field()] - 1) + args[1] = uint64(loc.Pos()) + args[2] = uint64(loc.Start()) + args[3] = uint64(loc.End()) + err = locEncoder.Add(hitNewDocNum, args...) + if err != nil { + return 0, 0, 0, nil, err + } + } + } + + lastDocNum = hitNewDocNum + lastFreq = uint64(nextFreq) + lastNorm = nextNorm + + next, err = postItr.Next() + } + + return lastDocNum, lastFreq, lastNorm, bufLoc, err +} + +func mergeStoredAndRemap(segments []*Segment, drops []*roaring.Bitmap, + fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64, + w *countHashWriter, closeCh chan struct{}) (storedIndexOffset uint64, newDocNums [][]uint64, err error) { + var newDocNum uint64 + + var data, compressed []byte + var metaBuf bytes.Buffer + varBuf := make([]byte, binary.MaxVarintLen64) + metaEncode := func(val uint64) (int, error) { + wb := binary.PutUvarint(varBuf, val) + return metaBuf.Write(varBuf[:wb]) + } + + vals := make([][][]byte, len(fieldsInv)) + + docNumOffsets := make([]uint64, newSegDocCount) + + vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) + defer visitDocumentCtxPool.Put(vdc) + + // for each segment + for segI, seg := range segments { + // check for the closure in meantime + if isClosed(closeCh) { + return 0, nil, segment.ErrClosed + } + + segNewDocNums := make([]uint64, seg.footer.numDocs) + + dropsI := drops[segI] + + // optimize when the field mapping is the same across all + // segments and there are no deletions, via byte-copying + // of stored docs bytes directly to the writer + if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) { + err := seg.copyStoredDocs(newDocNum, docNumOffsets, w) + if err != nil { + return 0, nil, err + } + + for i := uint64(0); i < seg.footer.numDocs; i++ { + segNewDocNums[i] = newDocNum + newDocNum++ + } + newDocNums = append(newDocNums, segNewDocNums) + + continue + } + + var err2 error + newDocNum, err2 = mergeStoredAndRemapSegment(seg, dropsI, segNewDocNums, newDocNum, &metaBuf, data, + fieldsInv, vals, vdc, fieldsMap, metaEncode, compressed, docNumOffsets, w) + if err2 != nil { + return 0, nil, err2 + } + + newDocNums = append(newDocNums, segNewDocNums) + } + + // return value is the start of the stored index + storedIndexOffset = uint64(w.Count()) + + // now write out the stored doc index + for _, docNumOffset := range docNumOffsets { + err := binary.Write(w, binary.BigEndian, docNumOffset) + if err != nil { + return 0, nil, err + } + } + + return storedIndexOffset, newDocNums, nil +} + +func mergeStoredAndRemapSegment(seg *Segment, dropsI *roaring.Bitmap, segNewDocNums []uint64, newDocNum uint64, + metaBuf *bytes.Buffer, data []byte, fieldsInv []string, vals [][][]byte, vdc *visitDocumentCtx, + fieldsMap map[string]uint16, metaEncode func(val uint64) (int, error), compressed []byte, docNumOffsets []uint64, + w *countHashWriter) (uint64, error) { + // for each doc num + for docNum := uint64(0); docNum < seg.footer.numDocs; docNum++ { + // TODO: roaring's API limits docNums to 32-bits? + if dropsI != nil && dropsI.Contains(uint32(docNum)) { + segNewDocNums[docNum] = docDropped + continue + } + + segNewDocNums[docNum] = newDocNum + + curr := 0 + metaBuf.Reset() + data = data[:0] + + // collect all the data + for i := 0; i < len(fieldsInv); i++ { + vals[i] = vals[i][:0] + } + err := seg.visitDocument(vdc, docNum, func(field string, value []byte) bool { + fieldID := int(fieldsMap[field]) - 1 + vals[fieldID] = append(vals[fieldID], value) + return true + }) + if err != nil { + return 0, err + } + + // now walk the fields in order + for fieldID := 0; fieldID < len(fieldsInv); fieldID++ { + storedFieldValues := vals[fieldID] + + var err2 error + curr, data, err2 = encodeStoredFieldValues(fieldID, + storedFieldValues, curr, metaEncode, data) + if err2 != nil { + return 0, err2 + } + } + + metaBytes := metaBuf.Bytes() + + compressed = snappy.Encode(compressed[:cap(compressed)], data) + + // record where we're about to start writing + docNumOffsets[newDocNum] = uint64(w.Count()) + + // write out the meta len and compressed data len + err = writeUvarints(w, + uint64(len(metaBytes)), + uint64(len(compressed))) + if err != nil { + return 0, err + } + // now write the meta + _, err = w.Write(metaBytes) + if err != nil { + return 0, err + } + // now write the compressed data + _, err = w.Write(compressed) + if err != nil { + return 0, err + } + + newDocNum++ + } + return newDocNum, nil +} + +// copyStoredDocs writes out a segment's stored doc info, optimized by +// using a single Write() call for the entire set of bytes. The +// newDocNumOffsets is filled with the new offsets for each doc. +func (s *Segment) copyStoredDocs(newDocNum uint64, newDocNumOffsets []uint64, + w *countHashWriter) error { + if s.footer.numDocs <= 0 { + return nil + } + + indexOffset0, storedOffset0, err := s.getDocStoredOffsetsOnly(0) // the segment's first doc + if err != nil { + return err + } + + indexOffsetN, storedOffsetN, readN, metaLenN, dataLenN, err := + s.getDocStoredOffsets(s.footer.numDocs - 1) // the segment's last doc + if err != nil { + return err + } + + storedOffset0New := uint64(w.Count()) + + storedBytesData, err := s.data.Read(int(storedOffset0), int(storedOffsetN+readN+metaLenN+dataLenN)) + if err != nil { + return err + } + storedBytes := storedBytesData + _, err = w.Write(storedBytes) + if err != nil { + return err + } + + // remap the storedOffset's for the docs into new offsets relative + // to storedOffset0New, filling the given docNumOffsetsOut array + for indexOffset := indexOffset0; indexOffset <= indexOffsetN; indexOffset += fileAddrWidth { + storedOffsetData, err := s.data.Read(int(indexOffset), int(indexOffset+fileAddrWidth)) + if err != nil { + return err + } + storedOffset := binary.BigEndian.Uint64(storedOffsetData) + storedOffsetNew := storedOffset - storedOffset0 + storedOffset0New + newDocNumOffsets[newDocNum] = storedOffsetNew // PANIC + newDocNum++ + } + + return nil +} + +// mergeFields builds a unified list of fields used across all the +// input segments, and computes whether the fields are the same across +// segments (which depends on fields to be sorted in the same way +// across segments) +func mergeFields(segments []*Segment) (same bool, fields []string) { + same = true + + var segment0Fields []string + if len(segments) > 0 { + segment0Fields = segments[0].Fields() + } + + fieldsExist := map[string]struct{}{} + for _, seg := range segments { + fields = seg.Fields() + for fieldi, field := range fields { + fieldsExist[field] = struct{}{} + if len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field { + same = false + } + } + } + + fields = make([]string, 0, len(fieldsExist)) + // ensure _id stays first + fields = append(fields, _idFieldName) + for k := range fieldsExist { + if k != _idFieldName { + fields = append(fields, k) + } + } + + sort.Strings(fields[1:]) // leave _id as first + + return same, fields +} + +func isClosed(closeCh chan struct{}) bool { + select { + case <-closeCh: + return true + default: + return false + } +} diff --git a/vendor/github.com/blevesearch/zapx/v13/new.go b/vendor/github.com/blugelabs/ice/new.go similarity index 54% rename from vendor/github.com/blevesearch/zapx/v13/new.go rename to vendor/github.com/blugelabs/ice/new.go index b4e0d0341..5af2b24e7 100644 --- a/vendor/github.com/blevesearch/zapx/v13/new.go +++ b/vendor/github.com/blugelabs/ice/new.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice import ( "bytes" @@ -22,60 +22,58 @@ import ( "sync" "github.com/RoaringBitmap/roaring" - index "github.com/blevesearch/bleve_index_api" - segment "github.com/blevesearch/scorch_segment_api/v2" "github.com/blevesearch/vellum" + segment "github.com/blugelabs/bluge_segment_api" "github.com/golang/snappy" ) -var NewSegmentBufferNumResultsBump int = 100 -var NewSegmentBufferNumResultsFactor float64 = 1.0 -var NewSegmentBufferAvgBytesPerDocFactor float64 = 1.0 +var newSegmentBufferNumResultsBump int = 100 +var newSegmentBufferNumResultsFactor float64 = 1.0 +var newSegmentBufferAvgBytesPerDocFactor float64 = 1.0 -// ValidateDocFields can be set by applications to perform additional checks -// on fields in a document being added to a new segment, by default it does -// nothing. -// This API is experimental and may be removed at any time. -var ValidateDocFields = func(field index.Field) error { - return nil -} - -// New creates an in-memory zap-encoded SegmentBase from a set of Documents -func (z *ZapPlugin) New(results []index.Document) ( +// New creates an in-memory implementation +// of a segment for the source documents +func New(results []segment.Document, normCalc func(string, int) float32) ( segment.Segment, uint64, error) { - return z.newWithChunkMode(results, DefaultChunkMode) + return newWithChunkMode(results, normCalc, defaultChunkMode) } -func (*ZapPlugin) newWithChunkMode(results []index.Document, +func newWithChunkMode(results []segment.Document, normCalc func(string, int) float32, chunkMode uint32) (segment.Segment, uint64, error) { s := interimPool.Get().(*interim) + s.normCalc = normCalc + var br bytes.Buffer if s.lastNumDocs > 0 { // use previous results to initialize the buf with an estimate // size, but note that the interim instance comes from a - // global interimPool, so multiple scorch instances indexing + // global interimPool, so multiple index instances indexing // different docs can lead to low quality estimates estimateAvgBytesPerDoc := int(float64(s.lastOutSize/s.lastNumDocs) * - NewSegmentBufferNumResultsFactor) - estimateNumResults := int(float64(len(results)+NewSegmentBufferNumResultsBump) * - NewSegmentBufferAvgBytesPerDocFactor) + newSegmentBufferNumResultsFactor) + estimateNumResults := int(float64(len(results)+newSegmentBufferNumResultsBump) * + newSegmentBufferAvgBytesPerDocFactor) br.Grow(estimateAvgBytesPerDoc * estimateNumResults) } s.results = results s.chunkMode = chunkMode - s.w = NewCountHashWriter(&br) + s.w = newCountHashWriter(&br) - storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, - err := s.convert() + var footer *footer + footer, dictOffsets, err := s.convert() if err != nil { return nil, uint64(0), err } + footer.crc = s.w.Sum32() + footer.chunkMode = chunkMode + footer.numDocs = uint64(len(results)) - sb, err := InitSegmentBase(br.Bytes(), s.w.Sum32(), chunkMode, - s.FieldsMap, s.FieldsInv, uint64(len(results)), - storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets) + sb, err := initSegmentBase(br.Bytes(), footer, + s.FieldsMap, s.FieldsInv, + s.FieldDocs, s.FieldFreqs, + dictOffsets) if err == nil && s.reset() == nil { s.lastNumDocs = len(results) @@ -86,16 +84,41 @@ func (*ZapPlugin) newWithChunkMode(results []index.Document, return sb, uint64(len(br.Bytes())), err } +func initSegmentBase(mem []byte, footer *footer, + fieldsMap map[string]uint16, fieldsInv []string, + fieldsDocs, fieldsFreqs map[uint16]uint64, + dictLocs []uint64) (*Segment, error) { + sb := &Segment{ + data: segment.NewDataBytes(mem), + footer: footer, + fieldsMap: fieldsMap, + fieldsInv: fieldsInv, + fieldDocs: fieldsDocs, + fieldFreqs: fieldsFreqs, + dictLocs: dictLocs, + fieldDvReaders: make(map[uint16]*docValueReader), + fieldFSTs: make(map[uint16]*vellum.FST), + } + sb.updateSize() + + err := sb.loadDvReaders() + if err != nil { + return nil, err + } + + return sb, nil +} + var interimPool = sync.Pool{New: func() interface{} { return &interim{} }} // interim holds temporary working data used while converting from -// analysis results to a zap-encoded segment +// the source operations to an encoded segment type interim struct { - results []index.Document + results []segment.Document chunkMode uint32 - w *CountHashWriter + w *countHashWriter // FieldsMap adds 1 to field id to avoid zero value issues // name -> field id + 1 @@ -105,6 +128,14 @@ type interim struct { // field id -> name FieldsInv []string + // FieldDocs tracks how many documents have at least one value + // for each field + FieldDocs map[uint16]uint64 + + // FieldFreqs tracks how many total tokens there are in a field + // across all documents + FieldFreqs map[uint16]uint64 + // Term dictionaries for each field // field id -> term -> postings list id + 1 Dicts []map[string]uint64 @@ -141,6 +172,8 @@ type interim struct { lastNumDocs int lastOutSize int + + normCalc func(string, int) float32 } func (s *interim) reset() (err error) { @@ -200,9 +233,7 @@ func (s *interim) grabBuf(size int) []byte { } type interimStoredField struct { - vals [][]byte - typs []byte - arrayposs [][]uint64 // array positions + vals [][]byte } type interimFreqNorm struct { @@ -212,23 +243,23 @@ type interimFreqNorm struct { } type interimLoc struct { - fieldID uint16 - pos uint64 - start uint64 - end uint64 - arrayposs []uint64 + fieldID uint16 + pos uint64 + start uint64 + end uint64 } -func (s *interim) convert() (uint64, uint64, uint64, []uint64, error) { +func (s *interim) convert() (*footer, []uint64, error) { s.FieldsMap = map[string]uint16{} + s.FieldDocs = map[uint16]uint64{} + s.FieldFreqs = map[uint16]uint64{} - s.getOrDefineField("_id") // _id field is fieldID 0 + // FIXME review if this is still necessary + // YES, integration tests fail when removed + s.getOrDefineField(_idFieldName) // _id field is fieldID 0 for _, result := range s.results { - result.VisitComposite(func(field index.CompositeField) { - s.getOrDefineField(field.Name()) - }) - result.VisitFields(func(field index.Field) { + result.EachField(func(field segment.Field) { s.getOrDefineField(field.Name()) }) } @@ -255,7 +286,7 @@ func (s *interim) convert() (uint64, uint64, uint64, []uint64, error) { storedIndexOffset, err := s.writeStoredFields() if err != nil { - return 0, 0, 0, nil, err + return nil, nil, err } var fdvIndexOffset uint64 @@ -264,18 +295,23 @@ func (s *interim) convert() (uint64, uint64, uint64, []uint64, error) { if len(s.results) > 0 { fdvIndexOffset, dictOffsets, err = s.writeDicts() if err != nil { - return 0, 0, 0, nil, err + return nil, nil, err } } else { dictOffsets = make([]uint64, len(s.FieldsInv)) } - fieldsIndexOffset, err := persistFields(s.FieldsInv, s.w, dictOffsets) + fieldsIndexOffset, err := persistFields(s.FieldsInv, s.FieldDocs, s.FieldFreqs, s.w, dictOffsets) if err != nil { - return 0, 0, 0, nil, err + return nil, nil, err } - return storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, nil + return &footer{ + storedIndexOffset: storedIndexOffset, + fieldsIndexOffset: fieldsIndexOffset, + docValueOffset: fdvIndexOffset, + version: Version, + }, dictOffsets, nil } func (s *interim) getOrDefineField(fieldName string) int { @@ -306,47 +342,8 @@ func (s *interim) prepareDicts() { var totTFs int var totLocs int - visitField := func(field index.Field) { - fieldID := uint16(s.getOrDefineField(field.Name())) - - dict := s.Dicts[fieldID] - dictKeys := s.DictKeys[fieldID] - - tfs := field.AnalyzedTokenFrequencies() - for term, tf := range tfs { - pidPlus1, exists := dict[term] - if !exists { - pidNext++ - pidPlus1 = uint64(pidNext) - - dict[term] = pidPlus1 - dictKeys = append(dictKeys, term) - - s.numTermsPerPostingsList = append(s.numTermsPerPostingsList, 0) - s.numLocsPerPostingsList = append(s.numLocsPerPostingsList, 0) - } - - pid := pidPlus1 - 1 - - s.numTermsPerPostingsList[pid] += 1 - s.numLocsPerPostingsList[pid] += len(tf.Locations) - - totLocs += len(tf.Locations) - } - - totTFs += len(tfs) - - s.DictKeys[fieldID] = dictKeys - } - for _, result := range s.results { - // walk each composite field - result.VisitComposite(func(field index.CompositeField) { - visitField(field) - }) - - // walk each field - result.VisitFields(visitField) + pidNext, totLocs, totTFs = s.prepareDictsForDocument(result, pidNext, totLocs, totTFs) } numPostingsLists := pidNext @@ -401,10 +398,62 @@ func (s *interim) prepareDicts() { } } +func (s *interim) prepareDictsForDocument(result segment.Document, pidNext, totLocs, totTFs int) ( + pidNextOut, totLocsOut, totTFsOut int) { + fieldsSeen := map[uint16]struct{}{} + result.EachField(func(field segment.Field) { + fieldID := uint16(s.getOrDefineField(field.Name())) + + fieldsSeen[fieldID] = struct{}{} + s.FieldFreqs[fieldID] += uint64(field.Length()) + + dict := s.Dicts[fieldID] + dictKeys := s.DictKeys[fieldID] + + var numTerms int + field.EachTerm(func(term segment.FieldTerm) { + numTerms++ + termStr := string(term.Term()) + pidPlus1, exists := dict[termStr] + if !exists { + pidNext++ + pidPlus1 = uint64(pidNext) + + dict[termStr] = pidPlus1 + dictKeys = append(dictKeys, termStr) + + s.numTermsPerPostingsList = append(s.numTermsPerPostingsList, 0) + s.numLocsPerPostingsList = append(s.numLocsPerPostingsList, 0) + } + + pid := pidPlus1 - 1 + + s.numTermsPerPostingsList[pid]++ + + var numLocations int + term.EachLocation(func(_ segment.Location) { + numLocations++ + }) + s.numLocsPerPostingsList[pid] += numLocations + + totLocs += numLocations + }) + + totTFs += numTerms + + s.DictKeys[fieldID] = dictKeys + }) + // record fields seen by this doc + for k := range fieldsSeen { + s.FieldDocs[k]++ + } + return pidNext, totLocs, totTFs +} + func (s *interim) processDocuments() { numFields := len(s.FieldsInv) reuseFieldLens := make([]int, numFields) - reuseFieldTFs := make([]index.TokenFrequencies, numFields) + reuseFieldTFs := make([]tokenFrequencies, numFields) for docNum, result := range s.results { for i := 0; i < numFields; i++ { // clear these for reuse @@ -418,32 +467,56 @@ func (s *interim) processDocuments() { } func (s *interim) processDocument(docNum uint64, - result index.Document, - fieldLens []int, fieldTFs []index.TokenFrequencies) { - visitField := func(field index.Field) { + result segment.Document, + fieldLens []int, fieldTFs []tokenFrequencies) { + visitField := func(field segment.Field) { fieldID := uint16(s.getOrDefineField(field.Name())) - fieldLens[fieldID] += field.AnalyzedLength() + fieldLens[fieldID] += field.Length() - existingFreqs := fieldTFs[fieldID] - if existingFreqs != nil { - existingFreqs.MergeAll(field.Name(), field.AnalyzedTokenFrequencies()) - } else { - fieldTFs[fieldID] = field.AnalyzedTokenFrequencies() + if existingFreqs := fieldTFs[fieldID]; existingFreqs == nil { + fieldTFs[fieldID] = make(map[string]*tokenFreq) } - } - // walk each composite field - result.VisitComposite(func(field index.CompositeField) { - visitField(field) - }) + existingFreqs := fieldTFs[fieldID] + field.EachTerm(func(term segment.FieldTerm) { + tfk := string(term.Term()) + existingTf, exists := existingFreqs[tfk] + if exists { + term.EachLocation(func(location segment.Location) { + existingTf.Locations = append(existingTf.Locations, + &tokenLocation{ + FieldVal: field.Name(), + StartVal: location.Start(), + EndVal: location.End(), + PositionVal: location.Pos(), + }) + }) + existingTf.frequency += term.Frequency() + } else { + newTf := &tokenFreq{ + TermVal: term.Term(), + frequency: term.Frequency(), + } + term.EachLocation(func(location segment.Location) { + newTf.Locations = append(newTf.Locations, + &tokenLocation{ + FieldVal: location.Field(), + StartVal: location.Start(), + EndVal: location.End(), + PositionVal: location.Pos(), + }) + }) + existingFreqs[tfk] = newTf + } + }) + } - // walk each field - result.VisitFields(visitField) + result.EachField(visitField) // now that it's been rolled up into fieldTFs, walk that for fieldID, tfs := range fieldTFs { dict := s.Dicts[fieldID] - norm := float32(1.0 / math.Sqrt(float64(fieldLens[fieldID]))) + norm := s.normCalc(s.FieldsInv[fieldID], fieldLens[fieldID]) for term, tf := range tfs { pid := dict[term] - 1 @@ -462,19 +535,14 @@ func (s *interim) processDocument(docNum uint64, for _, loc := range tf.Locations { var locf = uint16(fieldID) - if loc.Field != "" { - locf = uint16(s.getOrDefineField(loc.Field)) - } - var arrayposs []uint64 - if len(loc.ArrayPositions) > 0 { - arrayposs = loc.ArrayPositions + if loc.FieldVal != "" { + locf = uint16(s.getOrDefineField(loc.FieldVal)) } locs = append(locs, interimLoc{ - fieldID: locf, - pos: uint64(loc.Position), - start: uint64(loc.Start), - end: uint64(loc.End), - arrayposs: arrayposs, + fieldID: locf, + pos: uint64(loc.PositionVal), + start: uint64(loc.StartVal), + end: uint64(loc.EndVal), }) } @@ -506,49 +574,31 @@ func (s *interim) writeStoredFields() ( delete(docStoredFields, fieldID) } - var validationErr error - result.VisitFields(func(field index.Field) { + result.EachField(func(field segment.Field) { fieldID := uint16(s.getOrDefineField(field.Name())) - if field.Options().IsStored() { + if field.Store() { isf := docStoredFields[fieldID] isf.vals = append(isf.vals, field.Value()) - isf.typs = append(isf.typs, field.EncodedFieldType()) - isf.arrayposs = append(isf.arrayposs, field.ArrayPositions()) docStoredFields[fieldID] = isf } - if field.Options().IncludeDocValues() { + if field.IndexDocValues() { s.IncludeDocValues[fieldID] = true } - - err := ValidateDocFields(field) - if err != nil && validationErr == nil { - validationErr = err - } }) - if validationErr != nil { - return 0, validationErr - } var curr int s.metaBuf.Reset() data = data[:0] - // _id field special case optimizes ExternalID() lookups - idFieldVal := docStoredFields[uint16(0)].vals[0] - _, err = metaEncode(uint64(len(idFieldVal))) - if err != nil { - return 0, err - } - - // handle non-"_id" fields - for fieldID := 1; fieldID < len(s.FieldsInv); fieldID++ { + // handle fields + for fieldID := 0; fieldID < len(s.FieldsInv); fieldID++ { isf, exists := docStoredFields[uint16(fieldID)] if exists { - curr, data, err = persistStoredFieldValues( - fieldID, isf.vals, isf.typs, isf.arrayposs, + curr, data, err = encodeStoredFieldValues( + fieldID, isf.vals, curr, metaEncode, data) if err != nil { return 0, err @@ -562,9 +612,9 @@ func (s *interim) writeStoredFields() ( docStoredOffsets[docNum] = uint64(s.w.Count()) - _, err := writeUvarints(s.w, + err = writeUvarints(s.w, uint64(len(metaBytes)), - uint64(len(idFieldVal)+len(compressed))) + uint64(len(compressed))) if err != nil { return 0, err } @@ -574,11 +624,6 @@ func (s *interim) writeStoredFields() ( return 0, err } - _, err = s.w.Write(idFieldVal) - if err != nil { - return 0, err - } - _, err = s.w.Write(compressed) if err != nil { return 0, err @@ -608,8 +653,8 @@ func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err // these int coders are initialized with chunk size 1024 // however this will be reset to the correct chunk size // while processing each individual field-term section - tfEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1)) - locEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1)) + tfEncoder := newChunkedIntCoder(uint64(legacyChunkMode), uint64(len(s.results)-1)) + locEncoder := newChunkedIntCoder(uint64(legacyChunkMode), uint64(len(s.results)-1)) var docTermMap [][]byte @@ -621,210 +666,199 @@ func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err } for fieldID, terms := range s.DictKeys { - if cap(docTermMap) < len(s.results) { - docTermMap = make([][]byte, len(s.results)) - } else { - docTermMap = docTermMap[0:len(s.results)] - for docNum := range docTermMap { // reset the docTermMap - docTermMap[docNum] = docTermMap[docNum][:0] - } + err2 := s.writeDictsField(docTermMap, fieldID, terms, tfEncoder, locEncoder, buf, dictOffsets, fdvOffsetsStart, fdvOffsetsEnd) + if err2 != nil { + return 0, nil, err2 } + } - dict := s.Dicts[fieldID] - - for _, term := range terms { // terms are already sorted - pid := dict[term] - 1 - - postingsBS := s.Postings[pid] - - freqNorms := s.FreqNorms[pid] - freqNormOffset := 0 - - locs := s.Locs[pid] - locOffset := 0 + fdvIndexOffset = uint64(s.w.Count()) - chunkSize, err := getChunkSize(s.chunkMode, postingsBS.GetCardinality(), uint64(len(s.results))) - if err != nil { - return 0, nil, err - } - tfEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) - locEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) + for i := 0; i < len(fdvOffsetsStart); i++ { + n := binary.PutUvarint(buf, fdvOffsetsStart[i]) + _, err := s.w.Write(buf[:n]) + if err != nil { + return 0, nil, err + } + n = binary.PutUvarint(buf, fdvOffsetsEnd[i]) + _, err = s.w.Write(buf[:n]) + if err != nil { + return 0, nil, err + } + } - postingsItr := postingsBS.Iterator() - for postingsItr.HasNext() { - docNum := uint64(postingsItr.Next()) + return fdvIndexOffset, dictOffsets, nil +} - freqNorm := freqNorms[freqNormOffset] +func (s *interim) writeDictsField(docTermMap [][]byte, fieldID int, terms []string, tfEncoder, + locEncoder *chunkedIntCoder, buf []byte, dictOffsets, fdvOffsetsStart, fdvOffsetsEnd []uint64) error { + if cap(docTermMap) < len(s.results) { + docTermMap = make([][]byte, len(s.results)) + } else { + docTermMap = docTermMap[0:len(s.results)] + for docNum := range docTermMap { // reset the docTermMap + docTermMap[docNum] = docTermMap[docNum][:0] + } + } - err = tfEncoder.Add(docNum, - encodeFreqHasLocs(freqNorm.freq, freqNorm.numLocs > 0), - uint64(math.Float32bits(freqNorm.norm))) - if err != nil { - return 0, nil, err - } + dict := s.Dicts[fieldID] - if freqNorm.numLocs > 0 { - numBytesLocs := 0 - for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { - numBytesLocs += totalUvarintBytes( - uint64(loc.fieldID), loc.pos, loc.start, loc.end, - uint64(len(loc.arrayposs)), loc.arrayposs) - } + for _, term := range terms { // terms are already sorted + err2 := s.writeDictsTermField(docTermMap, dict, term, tfEncoder, locEncoder, buf) + if err2 != nil { + return err2 + } + } - err = locEncoder.Add(docNum, uint64(numBytesLocs)) - if err != nil { - return 0, nil, err - } + err := s.builder.Close() + if err != nil { + return err + } - for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { - err = locEncoder.Add(docNum, - uint64(loc.fieldID), loc.pos, loc.start, loc.end, - uint64(len(loc.arrayposs))) - if err != nil { - return 0, nil, err - } - - err = locEncoder.Add(docNum, loc.arrayposs...) - if err != nil { - return 0, nil, err - } - } + // record where this dictionary starts + dictOffsets[fieldID] = uint64(s.w.Count()) - locOffset += freqNorm.numLocs - } + vellumData := s.builderBuf.Bytes() - freqNormOffset++ + // write out the length of the vellum data + n := binary.PutUvarint(buf, uint64(len(vellumData))) + _, err = s.w.Write(buf[:n]) + if err != nil { + return err + } - docTermMap[docNum] = append( - append(docTermMap[docNum], term...), - termSeparator) - } + // write this vellum to disk + _, err = s.w.Write(vellumData) + if err != nil { + return err + } - tfEncoder.Close() - locEncoder.Close() + // reset vellum for reuse + s.builderBuf.Reset() - postingsOffset, err := - writePostings(postingsBS, tfEncoder, locEncoder, nil, s.w, buf) - if err != nil { - return 0, nil, err - } + err = s.builder.Reset(&s.builderBuf) + if err != nil { + return err + } - if postingsOffset > uint64(0) { - err = s.builder.Insert([]byte(term), postingsOffset) + // write the field doc values + // NOTE: doc values continue to use legacy chunk mode + chunkSize, err := getChunkSize(legacyChunkMode, 0, 0) + if err != nil { + return err + } + fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false) + if s.IncludeDocValues[fieldID] { + for docNum, docTerms := range docTermMap { + if len(docTerms) > 0 { + err = fdvEncoder.Add(uint64(docNum), docTerms) if err != nil { - return 0, nil, err + return err } } - - tfEncoder.Reset() - locEncoder.Reset() } - - err = s.builder.Close() + err = fdvEncoder.Close() if err != nil { - return 0, nil, err + return err } - // record where this dictionary starts - dictOffsets[fieldID] = uint64(s.w.Count()) - - vellumData := s.builderBuf.Bytes() + fdvOffsetsStart[fieldID] = uint64(s.w.Count()) - // write out the length of the vellum data - n := binary.PutUvarint(buf, uint64(len(vellumData))) - _, err = s.w.Write(buf[:n]) + _, err = fdvEncoder.Write() if err != nil { - return 0, nil, err + return err } - // write this vellum to disk - _, err = s.w.Write(vellumData) - if err != nil { - return 0, nil, err - } + fdvOffsetsEnd[fieldID] = uint64(s.w.Count()) - // reset vellum for reuse - s.builderBuf.Reset() + fdvEncoder.Reset() + } else { + fdvOffsetsStart[fieldID] = fieldNotUninverted + fdvOffsetsEnd[fieldID] = fieldNotUninverted + } + return nil +} - err = s.builder.Reset(&s.builderBuf) - if err != nil { - return 0, nil, err - } +func (s *interim) writeDictsTermField(docTermMap [][]byte, dict map[string]uint64, term string, tfEncoder, + locEncoder *chunkedIntCoder, buf []byte) error { + pid := dict[term] - 1 + + postingsBS := s.Postings[pid] - // write the field doc values - // NOTE: doc values continue to use legacy chunk mode - chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0) + freqNorms := s.FreqNorms[pid] + freqNormOffset := 0 + + locs := s.Locs[pid] + locOffset := 0 + + chunkSize, err := getChunkSize(s.chunkMode, postingsBS.GetCardinality(), uint64(len(s.results))) + if err != nil { + return err + } + tfEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) + locEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) + + postingsItr := postingsBS.Iterator() + for postingsItr.HasNext() { + docNum := uint64(postingsItr.Next()) + + freqNorm := freqNorms[freqNormOffset] + + err = tfEncoder.Add(docNum, + encodeFreqHasLocs(freqNorm.freq, freqNorm.numLocs > 0), + uint64(math.Float32bits(freqNorm.norm))) if err != nil { - return 0, nil, err + return err } - fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false) - if s.IncludeDocValues[fieldID] { - for docNum, docTerms := range docTermMap { - if len(docTerms) > 0 { - err = fdvEncoder.Add(uint64(docNum), docTerms) - if err != nil { - return 0, nil, err - } - } - } - err = fdvEncoder.Close() - if err != nil { - return 0, nil, err - } - fdvOffsetsStart[fieldID] = uint64(s.w.Count()) + if freqNorm.numLocs > 0 { + numBytesLocs := 0 + for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { + numBytesLocs += totalUvarintBytes( + uint64(loc.fieldID), loc.pos, loc.start, loc.end) + } - _, err = fdvEncoder.Write() + err = locEncoder.Add(docNum, uint64(numBytesLocs)) if err != nil { - return 0, nil, err + return err } - fdvOffsetsEnd[fieldID] = uint64(s.w.Count()) + for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { + err = locEncoder.Add(docNum, + uint64(loc.fieldID), loc.pos, loc.start, loc.end) + if err != nil { + return err + } + } - fdvEncoder.Reset() - } else { - fdvOffsetsStart[fieldID] = fieldNotUninverted - fdvOffsetsEnd[fieldID] = fieldNotUninverted + locOffset += freqNorm.numLocs } - } - fdvIndexOffset = uint64(s.w.Count()) + freqNormOffset++ - for i := 0; i < len(fdvOffsetsStart); i++ { - n := binary.PutUvarint(buf, fdvOffsetsStart[i]) - _, err := s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } - n = binary.PutUvarint(buf, fdvOffsetsEnd[i]) - _, err = s.w.Write(buf[:n]) - if err != nil { - return 0, nil, err - } + docTermMap[docNum] = append( + append(docTermMap[docNum], term...), + termSeparator) } - return fdvIndexOffset, dictOffsets, nil -} + tfEncoder.Close() + locEncoder.Close() -// returns the total # of bytes needed to encode the given uint64's -// into binary.PutUVarint() encoding -func totalUvarintBytes(a, b, c, d, e uint64, more []uint64) (n int) { - n = numUvarintBytes(a) - n += numUvarintBytes(b) - n += numUvarintBytes(c) - n += numUvarintBytes(d) - n += numUvarintBytes(e) - for _, v := range more { - n += numUvarintBytes(v) - } - return n -} + var postingsOffset uint64 + postingsOffset, err = + writePostings(postingsBS, tfEncoder, locEncoder, nil, s.w, buf) + if err != nil { + return err + } -// returns # of bytes needed to encode x in binary.PutUvarint() encoding -func numUvarintBytes(x uint64) (n int) { - for x >= 0x80 { - x >>= 7 - n++ + if postingsOffset > uint64(0) { + err = s.builder.Insert([]byte(term), postingsOffset) + if err != nil { + return err + } } - return n + 1 + + tfEncoder.Reset() + locEncoder.Reset() + return nil } diff --git a/vendor/github.com/blevesearch/zapx/v14/posting.go b/vendor/github.com/blugelabs/ice/posting.go similarity index 73% rename from vendor/github.com/blevesearch/zapx/v14/posting.go rename to vendor/github.com/blugelabs/ice/posting.go index 1d04247fb..c0dc2d4f7 100644 --- a/vendor/github.com/blevesearch/zapx/v14/posting.go +++ b/vendor/github.com/blugelabs/ice/posting.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,34 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zap +package ice import ( "encoding/binary" "fmt" "math" - "reflect" "github.com/RoaringBitmap/roaring" - segment "github.com/blevesearch/scorch_segment_api/v2" + segment "github.com/blugelabs/bluge_segment_api" ) -var reflectStaticSizePostingsList int -var reflectStaticSizePostingsIterator int -var reflectStaticSizePosting int -var reflectStaticSizeLocation int - -func init() { - var pl PostingsList - reflectStaticSizePostingsList = int(reflect.TypeOf(pl).Size()) - var pi PostingsIterator - reflectStaticSizePostingsIterator = int(reflect.TypeOf(pi).Size()) - var p Posting - reflectStaticSizePosting = int(reflect.TypeOf(p).Size()) - var l Location - reflectStaticSizeLocation = int(reflect.TypeOf(l).Size()) -} - // FST or vellum value (uint64) encoding is determined by the top two // highest-order or most significant bits... // @@ -70,32 +53,31 @@ func init() { // In the "1-hit" encoding, the field in that single doc may have // other terms, which is supported in the "1-hit" encoding by the // positive float31 norm. +const fSTValEncodingMaskRaw = 0xc000000000000000 +const fSTValEncodingMask = uint64(fSTValEncodingMaskRaw) +const fSTValEncoding1HitRaw = 0x8000000000000000 +const fSTValEncoding1Hit = uint64(fSTValEncoding1HitRaw) -const FSTValEncodingMask = uint64(0xc000000000000000) -const FSTValEncodingGeneral = uint64(0x0000000000000000) -const FSTValEncoding1Hit = uint64(0x8000000000000000) - -func FSTValEncode1Hit(docNum uint64, normBits uint64) uint64 { - return FSTValEncoding1Hit | ((mask31Bits & normBits) << 31) | (mask31Bits & docNum) +func fSTValEncode1Hit(docNum, normBits uint64) uint64 { + return fSTValEncoding1Hit | ((mask31Bits & normBits) << 31) | (mask31Bits & docNum) } -func FSTValDecode1Hit(v uint64) (docNum uint64, normBits uint64) { - return (mask31Bits & v), (mask31Bits & (v >> 31)) +func fSTValDecode1Hit(v uint64) (docNum, normBits uint64) { + return mask31Bits & v, mask31Bits & (v >> 31) } -const mask31Bits = uint64(0x000000007fffffff) +const mask31BitsRaw = 0x000000007fffffff +const mask31Bits = uint64(mask31BitsRaw) func under32Bits(x uint64) bool { return x <= mask31Bits } -const DocNum1HitFinished = math.MaxUint64 - -var NormBits1Hit = uint64(math.Float32bits(float32(1))) +const docNum1HitFinished = math.MaxUint64 // PostingsList is an in-memory representation of a postings list type PostingsList struct { - sb *SegmentBase + sb *Segment postingsOffset uint64 freqOffset uint64 locOffset uint64 @@ -114,7 +96,7 @@ type PostingsList struct { var emptyPostingsList = &PostingsList{} func (p *PostingsList) Size() int { - sizeInBytes := reflectStaticSizePostingsList + SizeOfPtr + sizeInBytes := reflectStaticSizePostingsList + sizeOfPtr if p.except != nil { sizeInBytes += int(p.except.GetSizeInBytes()) @@ -136,9 +118,9 @@ func (p *PostingsList) OrInto(receiver *roaring.Bitmap) { // Iterator returns an iterator for this postings list func (p *PostingsList) Iterator(includeFreq, includeNorm, includeLocs bool, - prealloc segment.PostingsIterator) segment.PostingsIterator { + prealloc segment.PostingsIterator) (segment.PostingsIterator, error) { if p.normBits1Hit == 0 && p.postings == nil { - return emptyPostingsIterator + return emptyPostingsIterator, nil } var preallocPI *PostingsIterator @@ -154,7 +136,7 @@ func (p *PostingsList) Iterator(includeFreq, includeNorm, includeLocs bool, } func (p *PostingsList) iterator(includeFreq, includeNorm, includeLocs bool, - rv *PostingsIterator) *PostingsIterator { + rv *PostingsIterator) (*PostingsIterator, error) { if rv == nil { rv = &PostingsIterator{} } else { @@ -194,25 +176,33 @@ func (p *PostingsList) iterator(includeFreq, includeNorm, includeLocs bool, rv.normBits1Hit = p.normBits1Hit if p.except != nil && p.except.Contains(uint32(rv.docNum1Hit)) { - rv.docNum1Hit = DocNum1HitFinished + rv.docNum1Hit = docNum1HitFinished } - return rv + return rv, nil } // "general" encoding, check if empty if p.postings == nil { - return rv + return rv, nil } // initialize freq chunk reader if rv.includeFreqNorm { - rv.freqNormReader = newChunkedIntDecoder(p.sb.mem, p.freqOffset, rv.freqNormReader) + var err error + rv.freqNormReader, err = newChunkedIntDecoder(p.sb.data, p.freqOffset, rv.freqNormReader) + if err != nil { + return nil, err + } } // initialize the loc chunk reader if rv.includeLocs { - rv.locReader = newChunkedIntDecoder(p.sb.mem, p.locOffset, rv.locReader) + var err error + rv.locReader, err = newChunkedIntDecoder(p.sb.data, p.locOffset, rv.locReader) + if err != nil { + return nil, err + } } rv.all = p.postings.Iterator() @@ -224,7 +214,7 @@ func (p *PostingsList) iterator(includeFreq, includeNorm, includeLocs bool, rv.Actual = rv.all // Optimize to use same iterator for all & Actual. } - return rv + return rv, nil } // Count returns the number of items on this postings list @@ -244,40 +234,56 @@ func (p *PostingsList) Count() uint64 { return n - e } -func (rv *PostingsList) read(postingsOffset uint64, d *Dictionary) error { - rv.postingsOffset = postingsOffset +func (p *PostingsList) read(postingsOffset uint64, d *Dictionary) error { + p.postingsOffset = postingsOffset // handle "1-hit" encoding special case - if rv.postingsOffset&FSTValEncodingMask == FSTValEncoding1Hit { - return rv.init1Hit(postingsOffset) + if p.postingsOffset&fSTValEncodingMask == fSTValEncoding1Hit { + return p.init1Hit(postingsOffset) } // read the location of the freq/norm details var n uint64 var read int - rv.freqOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+binary.MaxVarintLen64]) + freqOffsetData, err := d.sb.data.Read(int(postingsOffset+n), int(postingsOffset+binary.MaxVarintLen64)) + if err != nil { + return err + } + p.freqOffset, read = binary.Uvarint(freqOffsetData) n += uint64(read) - rv.locOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) + locOffsetData, err := d.sb.data.Read(int(postingsOffset+n), int(postingsOffset+n+binary.MaxVarintLen64)) + if err != nil { + return err + } + p.locOffset, read = binary.Uvarint(locOffsetData) n += uint64(read) + postingsLenData, err := d.sb.data.Read(int(postingsOffset+n), int(postingsOffset+n+binary.MaxVarintLen64)) + if err != nil { + return err + } var postingsLen uint64 - postingsLen, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) + postingsLen, read = binary.Uvarint(postingsLenData) n += uint64(read) - roaringBytes := d.sb.mem[postingsOffset+n : postingsOffset+n+postingsLen] + roaringData, err := d.sb.data.Read(int(postingsOffset+n), int(postingsOffset+n+postingsLen)) + if err != nil { + return err + } + roaringBytes := roaringData - if rv.postings == nil { - rv.postings = roaring.NewBitmap() + if p.postings == nil { + p.postings = roaring.NewBitmap() } - _, err := rv.postings.FromBuffer(roaringBytes) + _, err = p.postings.FromBuffer(roaringBytes) if err != nil { return fmt.Errorf("error loading roaring bitmap: %v", err) } - rv.chunkSize, err = getChunkSize(d.sb.chunkMode, - rv.postings.GetCardinality(), d.sb.numDocs) + p.chunkSize, err = getChunkSize(d.sb.footer.chunkMode, + p.postings.GetCardinality(), d.sb.footer.numDocs) if err != nil { return err } @@ -285,11 +291,11 @@ func (rv *PostingsList) read(postingsOffset uint64, d *Dictionary) error { return nil } -func (rv *PostingsList) init1Hit(fstVal uint64) error { - docNum, normBits := FSTValDecode1Hit(fstVal) +func (p *PostingsList) init1Hit(fstVal uint64) error { + docNum, normBits := fSTValDecode1Hit(fstVal) - rv.docNum1Hit = docNum - rv.normBits1Hit = normBits + p.docNum1Hit = docNum + p.normBits1Hit = normBits return nil } @@ -321,7 +327,7 @@ type PostingsIterator struct { var emptyPostingsIterator = &PostingsIterator{} func (i *PostingsIterator) Size() int { - sizeInBytes := reflectStaticSizePostingsIterator + SizeOfPtr + + sizeInBytes := reflectStaticSizePostingsIterator + sizeOfPtr + i.next.Size() // account for freqNormReader, locReader if we start using this. for _, entry := range i.nextLocs { @@ -330,6 +336,9 @@ func (i *PostingsIterator) Size() int { return sizeInBytes } +func (i *PostingsIterator) Empty() bool { + return false +} func (i *PostingsIterator) loadChunk(chunk int) error { if i.includeFreqNorm { @@ -350,7 +359,7 @@ func (i *PostingsIterator) loadChunk(chunk int) error { return nil } -func (i *PostingsIterator) readFreqNormHasLocs() (uint64, uint64, bool, error) { +func (i *PostingsIterator) readFreqNormHasLocs() (freq int, norm uint64, hasLocs bool, err error) { if i.normBits1Hit != 0 { return 1, i.normBits1Hit, false, nil } @@ -360,14 +369,14 @@ func (i *PostingsIterator) readFreqNormHasLocs() (uint64, uint64, bool, error) { return 0, 0, false, fmt.Errorf("error reading frequency: %v", err) } - freq, hasLocs := decodeFreqHasLocs(freqHasLocs) + freq, hasLocs = decodeFreqHasLocs(freqHasLocs) - normBits, err := i.freqNormReader.readUvarint() + norm, err = i.freqNormReader.readUvarint() if err != nil { return 0, 0, false, fmt.Errorf("error reading norm: %v", err) } - return freq, normBits, hasLocs, nil + return freq, norm, hasLocs, nil } func (i *PostingsIterator) skipFreqNormReadHasLocs() (bool, error) { @@ -388,15 +397,15 @@ func (i *PostingsIterator) skipFreqNormReadHasLocs() (bool, error) { func encodeFreqHasLocs(freq uint64, hasLocs bool) uint64 { rv := freq << 1 if hasLocs { - rv = rv | 0x01 // 0'th LSB encodes whether there are locations + rv |= 0x01 // 0'th LSB encodes whether there are locations } return rv } -func decodeFreqHasLocs(freqHasLocs uint64) (uint64, bool) { +func decodeFreqHasLocs(freqHasLocs uint64) (int, bool) { freq := freqHasLocs >> 1 hasLocs := freqHasLocs&0x01 != 0 - return freq, hasLocs + return int(freq), hasLocs } // readLocation processes all the integers on the stream representing a single @@ -422,32 +431,11 @@ func (i *PostingsIterator) readLocation(l *Location) error { if err != nil { return fmt.Errorf("error reading location end: %v", err) } - // read off num array pos - numArrayPos, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading location num array pos: %v", err) - } l.field = i.postings.sb.fieldsInv[fieldID] - l.pos = pos - l.start = start - l.end = end - - if cap(l.ap) < int(numArrayPos) { - l.ap = make([]uint64, int(numArrayPos)) - } else { - l.ap = l.ap[:int(numArrayPos)] - } - - // read off array positions - for k := 0; k < int(numArrayPos); k++ { - ap, err := i.locReader.readUvarint() - if err != nil { - return fmt.Errorf("error reading array position: %v", err) - } - - l.ap[k] = ap - } + l.pos = int(pos) + l.start = int(start) + l.end = int(end) return nil } @@ -463,6 +451,8 @@ func (i *PostingsIterator) Advance(docNum uint64) (segment.Posting, error) { return i.nextAtOrAfter(docNum) } +const locSliceGrowth = 2 + // Next returns the next posting on the postings list, or nil at the end func (i *PostingsIterator) nextAtOrAfter(atOrAfter uint64) (segment.Posting, error) { docNum, exists, err := i.nextDocNumAtOrAfter(atOrAfter) @@ -493,13 +483,13 @@ func (i *PostingsIterator) nextAtOrAfter(atOrAfter uint64) (segment.Posting, err // rv.freq >= "number of locs", since in a composite field, // some component fields might have their IncludeTermVector // flags disabled while other component fields are enabled - if cap(i.nextLocs) >= int(rv.freq) { + if cap(i.nextLocs) >= rv.freq { i.nextLocs = i.nextLocs[0:rv.freq] } else { - i.nextLocs = make([]Location, rv.freq, rv.freq*2) + i.nextLocs = make([]Location, rv.freq, rv.freq*locSliceGrowth) } - if cap(i.nextSegmentLocs) < int(rv.freq) { - i.nextSegmentLocs = make([]segment.Location, rv.freq, rv.freq*2) + if cap(i.nextSegmentLocs) < rv.freq { + i.nextSegmentLocs = make([]segment.Location, rv.freq, rv.freq*locSliceGrowth) } rv.locs = i.nextSegmentLocs[:0] @@ -525,18 +515,18 @@ func (i *PostingsIterator) nextAtOrAfter(atOrAfter uint64) (segment.Posting, err // nextDocNum returns the next docNum on the postings list, and also // sets up the currChunk / loc related fields of the iterator. -func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool, error) { +func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (docNum uint64, exists bool, err error) { if i.normBits1Hit != 0 { - if i.docNum1Hit == DocNum1HitFinished { + if i.docNum1Hit == docNum1HitFinished { return 0, false, nil } if i.docNum1Hit < atOrAfter { // advanced past our 1-hit - i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum + i.docNum1Hit = docNum1HitFinished // consume our 1-hit docNum return 0, false, nil } docNum := i.docNum1Hit - i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum + i.docNum1Hit = docNum1HitFinished // consume our 1-hit docNum return docNum, true, nil } @@ -557,6 +547,7 @@ func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool, n := i.Actual.Next() allN := i.all.Next() + nChunk := n / uint32(i.postings.chunkSize) // when allN becomes >= to here, then allN is in the same chunk as nChunk. @@ -590,7 +581,7 @@ func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool, // optimization when the postings list is "clean" (e.g., no updates & // no deletions) where the all bitmap is the same as the actual bitmap func (i *PostingsIterator) nextDocNumAtOrAfterClean( - atOrAfter uint64) (uint64, bool, error) { + atOrAfter uint64) (docNum uint64, exists bool, err error) { if !i.includeFreqNorm { i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) @@ -615,7 +606,7 @@ func (i *PostingsIterator) nextDocNumAtOrAfterClean( if nChunk != nChunkPrev { sameChunkNexts = 0 } else { - sameChunkNexts += 1 + sameChunkNexts++ } } @@ -670,54 +661,38 @@ func (i *PostingsIterator) currChunkNext(nChunk uint32) error { // DocNum1Hit returns the docNum and true if this is "1-hit" optimized // and the docNum is available. -func (p *PostingsIterator) DocNum1Hit() (uint64, bool) { - if p.normBits1Hit != 0 && p.docNum1Hit != DocNum1HitFinished { - return p.docNum1Hit, true +func (i *PostingsIterator) DocNum1Hit() (uint64, bool) { + if i.normBits1Hit != 0 && i.docNum1Hit != docNum1HitFinished { + return i.docNum1Hit, true } return 0, false } // ActualBitmap returns the underlying actual bitmap // which can be used up the stack for optimizations -func (p *PostingsIterator) ActualBitmap() *roaring.Bitmap { - return p.ActualBM +func (i *PostingsIterator) ActualBitmap() *roaring.Bitmap { + return i.ActualBM } // ReplaceActual replaces the ActualBM with the provided // bitmap -func (p *PostingsIterator) ReplaceActual(abm *roaring.Bitmap) { - p.ActualBM = abm - p.Actual = abm.Iterator() -} - -// PostingsIteratorFromBitmap constructs a PostingsIterator given an -// "actual" bitmap. -func PostingsIteratorFromBitmap(bm *roaring.Bitmap, - includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { - return &PostingsIterator{ - ActualBM: bm, - Actual: bm.Iterator(), - includeFreqNorm: includeFreqNorm, - includeLocs: includeLocs, - }, nil -} - -// PostingsIteratorFrom1Hit constructs a PostingsIterator given a -// 1-hit docNum. -func PostingsIteratorFrom1Hit(docNum1Hit uint64, - includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { - return &PostingsIterator{ - docNum1Hit: docNum1Hit, - normBits1Hit: NormBits1Hit, - includeFreqNorm: includeFreqNorm, - includeLocs: includeLocs, - }, nil +func (i *PostingsIterator) ReplaceActual(abm *roaring.Bitmap) { + i.ActualBM = abm + i.Actual = abm.Iterator() +} + +func (i *PostingsIterator) Count() uint64 { + return i.postings.Count() +} + +func (i *PostingsIterator) Close() error { + return nil } // Posting is a single entry in a postings list type Posting struct { docNum uint64 - freq uint64 + freq int norm float32 locs []segment.Location } @@ -737,8 +712,13 @@ func (p *Posting) Number() uint64 { return p.docNum } +// SetNumber sets the document number of this posting +func (p *Posting) SetNumber(n uint64) { + p.docNum = n +} + // Frequency returns the frequencies of occurrence of this term in this doc/field -func (p *Posting) Frequency() uint64 { +func (p *Posting) Frequency() int { return p.freq } @@ -755,16 +735,14 @@ func (p *Posting) Locations() []segment.Location { // Location represents the location of a single occurrence type Location struct { field string - pos uint64 - start uint64 - end uint64 - ap []uint64 + pos int + start int + end int } func (l *Location) Size() int { return reflectStaticSizeLocation + - len(l.field) + - len(l.ap)*SizeOfUint64 + len(l.field) } // Field returns the name of the field (useful in composite fields to know @@ -774,21 +752,16 @@ func (l *Location) Field() string { } // Start returns the start byte offset of this occurrence -func (l *Location) Start() uint64 { +func (l *Location) Start() int { return l.start } // End returns the end byte offset of this occurrence -func (l *Location) End() uint64 { +func (l *Location) End() int { return l.end } // Pos returns the 1-based phrase position of this occurrence -func (l *Location) Pos() uint64 { +func (l *Location) Pos() int { return l.pos } - -// ArrayPositions returns the array position vector associated with this occurrence -func (l *Location) ArrayPositions() []uint64 { - return l.ap -} diff --git a/vendor/github.com/blugelabs/ice/read.go b/vendor/github.com/blugelabs/ice/read.go new file mode 100644 index 000000000..088743365 --- /dev/null +++ b/vendor/github.com/blugelabs/ice/read.go @@ -0,0 +1,74 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ice + +import "encoding/binary" + +func (s *Segment) getDocStoredMetaAndCompressed(docNum uint64) (meta, data []byte, err error) { + _, storedOffset, n, metaLen, dataLen, err := s.getDocStoredOffsets(docNum) + if err != nil { + return nil, nil, err + } + + meta, err = s.data.Read(int(storedOffset+n), int(storedOffset+n+metaLen)) + if err != nil { + return nil, nil, err + } + data, err = s.data.Read(int(storedOffset+n+metaLen), int(storedOffset+n+metaLen+dataLen)) + if err != nil { + return nil, nil, err + } + + return meta, data, nil +} + +func (s *Segment) getDocStoredOffsets(docNum uint64) ( + indexOffset, storedOffset, n, metaLen, dataLen uint64, err error) { + indexOffset = s.footer.storedIndexOffset + (fileAddrWidth * docNum) + + storedOffsetData, err := s.data.Read(int(indexOffset), int(indexOffset+fileAddrWidth)) + if err != nil { + return 0, 0, 0, 0, 0, err + } + storedOffset = binary.BigEndian.Uint64(storedOffsetData) + + metaLenData, err := s.data.Read(int(storedOffset), int(storedOffset+binary.MaxVarintLen64)) + if err != nil { + return 0, 0, 0, 0, 0, err + } + var read int + metaLen, read = binary.Uvarint(metaLenData) + n += uint64(read) + + dataLenData, err := s.data.Read(int(storedOffset+n), int(storedOffset+n+binary.MaxVarintLen64)) + if err != nil { + return 0, 0, 0, 0, 0, err + } + dataLen, read = binary.Uvarint(dataLenData) + n += uint64(read) + + return indexOffset, storedOffset, n, metaLen, dataLen, nil +} + +func (s *Segment) getDocStoredOffsetsOnly(docNum int) (indexOffset, storedOffset uint64, err error) { + indexOffset = s.footer.storedIndexOffset + (fileAddrWidth * uint64(docNum)) + + storedOffsetData, err := s.data.Read(int(indexOffset), int(indexOffset+fileAddrWidth)) + if err != nil { + return 0, 0, err + } + storedOffset = binary.BigEndian.Uint64(storedOffsetData) + return indexOffset, storedOffset, nil +} diff --git a/vendor/github.com/blugelabs/ice/segment.go b/vendor/github.com/blugelabs/ice/segment.go new file mode 100644 index 000000000..5b77791d1 --- /dev/null +++ b/vendor/github.com/blugelabs/ice/segment.go @@ -0,0 +1,344 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ice + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "sync" + + "github.com/RoaringBitmap/roaring" + "github.com/blevesearch/vellum" + segment "github.com/blugelabs/bluge_segment_api" + "github.com/golang/snappy" +) + +const Version uint32 = 1 + +const Type string = "ice" + +type Segment struct { + data *segment.Data + footer *footer + + fieldsMap map[string]uint16 // fieldName -> fieldID+1 + fieldsInv []string // fieldID -> fieldName + fieldDocs map[uint16]uint64 // fieldID -> # docs with value in field + fieldFreqs map[uint16]uint64 // fieldID -> # total tokens in field + + dictLocs []uint64 + fieldDvReaders map[uint16]*docValueReader // naive chunk cache per field + fieldDvNames []string // field names cached in fieldDvReaders + size uint64 + + // state loaded dynamically + m sync.Mutex + fieldFSTs map[uint16]*vellum.FST +} + +func (s *Segment) WriteTo(w io.Writer, _ chan struct{}) (int64, error) { + bw := bufio.NewWriter(w) + + n, err := s.data.WriteTo(w) + if err != nil { + return n, fmt.Errorf("error persisting segment: %w", err) + } + + err = persistFooter(s.footer, bw) + if err != nil { + return n, fmt.Errorf("error persisting segment footer: %w", err) + } + + err = bw.Flush() + if err != nil { + return n, err + } + + return n + footerLen, nil +} + +func (s *Segment) Type() string { + return Type +} + +// Version returns the file version in the file footer +func (s *Segment) Version() uint32 { + return s.footer.version +} + +func (s *Segment) Size() int { + return int(s.size) +} + +func (s *Segment) updateSize() { + sizeInBytes := reflectStaticSizeSegment + + s.data.Size() + + // fieldsMap + for k := range s.fieldsMap { + sizeInBytes += (len(k) + sizeOfString) + sizeOfUint16 + } + + // fieldsInv, dictLocs + for _, entry := range s.fieldsInv { + sizeInBytes += len(entry) + sizeOfString + } + sizeInBytes += len(s.dictLocs) * sizeOfUint64 + + // fieldDvReaders + for _, v := range s.fieldDvReaders { + sizeInBytes += sizeOfUint16 + sizeOfPtr + if v != nil { + sizeInBytes += v.size() + } + } + + s.size = uint64(sizeInBytes) +} + +// DictionaryReader returns the term dictionary for the specified field +func (s *Segment) Dictionary(field string) (segment.Dictionary, error) { + dict, err := s.dictionary(field) + if err == nil && dict == nil { + return emptyDictionary, nil + } + return dict, err +} + +func (s *Segment) dictionary(field string) (rv *Dictionary, err error) { + fieldIDPlus1 := s.fieldsMap[field] + if fieldIDPlus1 > 0 { + rv = &Dictionary{ + sb: s, + field: field, + fieldID: fieldIDPlus1 - 1, + } + + dictStart := s.dictLocs[rv.fieldID] + if dictStart > 0 { + var ok bool + s.m.Lock() + if rv.fst, ok = s.fieldFSTs[rv.fieldID]; !ok { + // read the length of the vellum data + var vellumLenData []byte + vellumLenData, err = s.data.Read(int(dictStart), int(dictStart+binary.MaxVarintLen64)) + if err != nil { + return nil, err + } + vellumLen, read := binary.Uvarint(vellumLenData) + var fstBytes []byte + fstBytes, err = s.data.Read(int(dictStart+uint64(read)), int(dictStart+uint64(read)+vellumLen)) + if err != nil { + return nil, err + } + rv.fst, err = vellum.Load(fstBytes) + if err != nil { + s.m.Unlock() + return nil, fmt.Errorf("dictionary field %s vellum err: %v", field, err) + } + + s.fieldFSTs[rv.fieldID] = rv.fst + } + + s.m.Unlock() + rv.fstReader, err = rv.fst.Reader() + if err != nil { + return nil, fmt.Errorf("dictionary field %s vellum reader err: %v", field, err) + } + } + } + + return rv, nil +} + +// visitDocumentCtx holds data structures that are reusable across +// multiple VisitStoredFields() calls to avoid memory allocations +type visitDocumentCtx struct { + buf []byte + reader bytes.Reader +} + +var visitDocumentCtxPool = sync.Pool{ + New: func() interface{} { + reuse := &visitDocumentCtx{} + return reuse + }, +} + +// VisitStoredFields invokes the DocFieldValueVistor for each stored field +// for the specified doc number +func (s *Segment) VisitStoredFields(num uint64, visitor segment.StoredFieldVisitor) error { + vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) + defer visitDocumentCtxPool.Put(vdc) + return s.visitDocument(vdc, num, visitor) +} + +func (s *Segment) visitDocument(vdc *visitDocumentCtx, num uint64, + visitor segment.StoredFieldVisitor) error { + // first make sure this is a valid number in this segment + if num < s.footer.numDocs { + meta, compressed, err := s.getDocStoredMetaAndCompressed(num) + if err != nil { + return err + } + + vdc.reader.Reset(meta) + + uncompressed, err := snappy.Decode(vdc.buf[:cap(vdc.buf)], compressed) + if err != nil { + return err + } + + var keepGoing = true + for keepGoing { + field, err := binary.ReadUvarint(&vdc.reader) + if err == io.EOF { + break + } + if err != nil { + return err + } + offset, err := binary.ReadUvarint(&vdc.reader) + if err != nil { + return err + } + l, err := binary.ReadUvarint(&vdc.reader) + if err != nil { + return err + } + + value := uncompressed[offset : offset+l] + keepGoing = visitor(s.fieldsInv[field], value) + } + + vdc.buf = uncompressed + } + return nil +} + +// Count returns the number of documents in this segment. +func (s *Segment) Count() uint64 { + return s.footer.numDocs +} + +func (s *Segment) DocsMatchingTerms(terms []segment.Term) (*roaring.Bitmap, error) { + rv := roaring.New() + + if len(s.fieldsMap) > 0 { + // we expect the common case to be the same field for all + // so we optimize for that, but allow it to work if that + // isn't the case + var err error + var lastField string + var dict *Dictionary + for i, term := range terms { + thisField := term.Field() + if thisField != lastField { + dict, err = s.dictionary(term.Field()) + if err != nil { + return nil, err + } + lastField = thisField + } + term := terms[i] + postingsList := emptyPostingsList + postingsList, err = dict.postingsList(term.Term(), nil, postingsList) + if err != nil { + return nil, err + } + postingsList.OrInto(rv) + } + } + return rv, nil +} + +// Fields returns the field names used in this segment +func (s *Segment) Fields() []string { + return s.fieldsInv +} + +// CRC returns the CRC value stored in the file footer +func (s *Segment) CRC() uint32 { + return s.footer.crc +} + +// ChunkFactor returns the chunk factor in the file footer +func (s *Segment) ChunkMode() uint32 { + return s.footer.chunkMode +} + +// FieldsIndexOffset returns the fields index offset in the file footer +func (s *Segment) FieldsIndexOffset() uint64 { + return s.footer.fieldsIndexOffset +} + +// StoredIndexOffset returns the stored value index offset in the file footer +func (s *Segment) StoredIndexOffset() uint64 { + return s.footer.storedIndexOffset +} + +// DocValueOffset returns the docValue offset in the file footer +func (s *Segment) DocValueOffset() uint64 { + return s.footer.docValueOffset +} + +// NumDocs returns the number of documents in the file footer +func (s *Segment) NumDocs() uint64 { + return s.footer.numDocs +} + +func (s *Segment) loadDvReaders() error { + if s.footer.docValueOffset == fieldNotUninverted || s.footer.numDocs == 0 { + return nil + } + + var read uint64 + for fieldID, field := range s.fieldsInv { + var fieldLocStart, fieldLocEnd uint64 + var n int + fieldLocStartData, err := s.data.Read(int(s.footer.docValueOffset+read), int(s.footer.docValueOffset+read+binary.MaxVarintLen64)) + if err != nil { + return err + } + fieldLocStart, n = binary.Uvarint(fieldLocStartData) + if n <= 0 { + return fmt.Errorf("loadDvReaders: failed to read the docvalue offset start for field %d", fieldID) + } + read += uint64(n) + fieldLocEndData, err := s.data.Read(int(s.footer.docValueOffset+read), int(s.footer.docValueOffset+read+binary.MaxVarintLen64)) + if err != nil { + return err + } + fieldLocEnd, n = binary.Uvarint(fieldLocEndData) + if n <= 0 { + return fmt.Errorf("loadDvReaders: failed to read the docvalue offset end for field %d", fieldID) + } + read += uint64(n) + + fieldDvReader, err := s.loadFieldDocValueReader(field, fieldLocStart, fieldLocEnd) + if err != nil { + return err + } + if fieldDvReader != nil { + s.fieldDvReaders[uint16(fieldID)] = fieldDvReader + s.fieldDvNames = append(s.fieldDvNames, field) + } + } + + return nil +} diff --git a/vendor/github.com/blugelabs/ice/sizes.go b/vendor/github.com/blugelabs/ice/sizes.go new file mode 100644 index 000000000..e851daa2e --- /dev/null +++ b/vendor/github.com/blugelabs/ice/sizes.go @@ -0,0 +1,55 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ice + +import ( + "reflect" +) + +func init() { + var ptr *int + sizeOfPtr = int(reflect.TypeOf(ptr).Size()) + var str string + sizeOfString = int(reflect.TypeOf(str).Size()) + var u16 uint16 + sizeOfUint16 = int(reflect.TypeOf(u16).Size()) + var u64 uint64 + sizeOfUint64 = int(reflect.TypeOf(u64).Size()) + reflectStaticSizeSegment = int(reflect.TypeOf(Segment{}).Size()) + var md metaData + reflectStaticSizeMetaData = int(reflect.TypeOf(md).Size()) + var dvi docValueReader + reflectStaticSizedocValueReader = int(reflect.TypeOf(dvi).Size()) + var pl PostingsList + reflectStaticSizePostingsList = int(reflect.TypeOf(pl).Size()) + var pi PostingsIterator + reflectStaticSizePostingsIterator = int(reflect.TypeOf(pi).Size()) + var p Posting + reflectStaticSizePosting = int(reflect.TypeOf(p).Size()) + var l Location + reflectStaticSizeLocation = int(reflect.TypeOf(l).Size()) +} + +var sizeOfPtr int +var sizeOfString int +var sizeOfUint16 int +var sizeOfUint64 int +var reflectStaticSizeSegment int +var reflectStaticSizeMetaData int +var reflectStaticSizedocValueReader int +var reflectStaticSizePostingsList int +var reflectStaticSizePostingsIterator int +var reflectStaticSizePosting int +var reflectStaticSizeLocation int diff --git a/vendor/github.com/blugelabs/ice/stats.go b/vendor/github.com/blugelabs/ice/stats.go new file mode 100644 index 000000000..a298a9e98 --- /dev/null +++ b/vendor/github.com/blugelabs/ice/stats.go @@ -0,0 +1,54 @@ +// Copyright (c) 2020 The Bluge Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ice + +import ( + segment "github.com/blugelabs/bluge_segment_api" +) + +type CollectionStats struct { + totalDocCount uint64 + docCount uint64 + sumTotalTermFreq uint64 +} + +func (c *CollectionStats) TotalDocumentCount() uint64 { + return c.totalDocCount +} + +func (c *CollectionStats) DocumentCount() uint64 { + return c.docCount +} + +func (c *CollectionStats) SumTotalTermFrequency() uint64 { + return c.sumTotalTermFreq +} + +func (c *CollectionStats) Merge(other segment.CollectionStats) { + c.totalDocCount += other.TotalDocumentCount() + c.docCount += other.DocumentCount() + c.sumTotalTermFreq += other.SumTotalTermFrequency() +} + +func (s *Segment) CollectionStats(field string) (segment.CollectionStats, error) { + var rv = &CollectionStats{} + fieldIDPlus1 := s.fieldsMap[field] + if fieldIDPlus1 > 0 { + rv.totalDocCount = s.footer.numDocs + rv.docCount = s.fieldDocs[fieldIDPlus1-1] + rv.sumTotalTermFreq = s.fieldFreqs[fieldIDPlus1-1] + } + return rv, nil +} diff --git a/vendor/github.com/blugelabs/ice/write.go b/vendor/github.com/blugelabs/ice/write.go new file mode 100644 index 000000000..b42f214a9 --- /dev/null +++ b/vendor/github.com/blugelabs/ice/write.go @@ -0,0 +1,248 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ice + +import ( + "encoding/binary" + "io" + "math" + + "github.com/RoaringBitmap/roaring" +) + +const fieldNotUninverted = math.MaxUint64 + +type varintEncoder func(uint64) (int, error) + +func encodeStoredFieldValues(fieldID int, + storedFieldValues [][]byte, + curr int, metaEncode varintEncoder, data []byte) ( + newCurr int, newData []byte, err error) { + for i := 0; i < len(storedFieldValues); i++ { + // encode field + _, err := metaEncode(uint64(fieldID)) + if err != nil { + return 0, nil, err + } + // encode start offset + _, err = metaEncode(uint64(curr)) + if err != nil { + return 0, nil, err + } + // end len + _, err = metaEncode(uint64(len(storedFieldValues[i]))) + if err != nil { + return 0, nil, err + } + + data = append(data, storedFieldValues[i]...) + curr += len(storedFieldValues[i]) + } + + return curr, data, nil +} + +func writePostings(postings *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder, + use1HitEncoding func(uint64) (bool, uint64, uint64), + w *countHashWriter, bufMaxVarintLen64 []byte) ( + offset uint64, err error) { + termCardinality := postings.GetCardinality() + if termCardinality <= 0 { + return 0, nil + } + + if use1HitEncoding != nil { + encodeAs1Hit, docNum1Hit, normBits1Hit := use1HitEncoding(termCardinality) + if encodeAs1Hit { + return fSTValEncode1Hit(docNum1Hit, normBits1Hit), nil + } + } + + var tfOffset uint64 + tfOffset, err = tfEncoder.writeAt(w) + if err != nil { + return 0, err + } + + var locOffset uint64 + locOffset, err = locEncoder.writeAt(w) + if err != nil { + return 0, err + } + + postingsOffset := uint64(w.Count()) + + n := binary.PutUvarint(bufMaxVarintLen64, tfOffset) + _, err = w.Write(bufMaxVarintLen64[:n]) + if err != nil { + return 0, err + } + + n = binary.PutUvarint(bufMaxVarintLen64, locOffset) + _, err = w.Write(bufMaxVarintLen64[:n]) + if err != nil { + return 0, err + } + + _, err = writeRoaringWithLen(postings, w, bufMaxVarintLen64) + if err != nil { + return 0, err + } + + return postingsOffset, nil +} + +// returns the total # of bytes needed to encode the given uint64's +// into binary.PutUVarint() encoding +func totalUvarintBytes(a, b, c, d uint64) (n int) { + n = numUvarintBytes(a) + n += numUvarintBytes(b) + n += numUvarintBytes(c) + n += numUvarintBytes(d) + return n +} + +// returns # of bytes needed to encode x in binary.PutUvarint() encoding +func numUvarintBytes(x uint64) (n int) { + for x >= 0x80 { + x >>= 7 + n++ + } + return n + 1 +} + +// writes out the length of the roaring bitmap in bytes as varint +// then writes out the roaring bitmap itself +func writeRoaringWithLen(r *roaring.Bitmap, w io.Writer, + reuseBufVarint []byte) (int, error) { + buf, err := r.ToBytes() + if err != nil { + return 0, err + } + + var tw int + + // write out the length + n := binary.PutUvarint(reuseBufVarint, uint64(len(buf))) + nw, err := w.Write(reuseBufVarint[:n]) + tw += nw + if err != nil { + return tw, err + } + + // write out the roaring bytes + nw, err = w.Write(buf) + tw += nw + if err != nil { + return tw, err + } + + return tw, nil +} + +func persistFields(fieldsInv []string, fieldDocs, fieldFreqs map[uint16]uint64, + w *countHashWriter, dictLocs []uint64) (uint64, error) { + var rv uint64 + var fieldsOffsets []uint64 + + for fieldID, fieldName := range fieldsInv { + // record start of this field + fieldsOffsets = append(fieldsOffsets, uint64(w.Count())) + + // write out the dict location and field name length + err := writeUvarints(w, dictLocs[fieldID], uint64(len(fieldName))) + if err != nil { + return 0, err + } + + // write out the field name + _, err = w.Write([]byte(fieldName)) + if err != nil { + return 0, err + } + + // write out the number of docs using this field + // and the number of total tokens + err = writeUvarints(w, fieldDocs[uint16(fieldID)], fieldFreqs[uint16(fieldID)]) + if err != nil { + return 0, err + } + } + + // now write out the fields index + rv = uint64(w.Count()) + for fieldID := range fieldsInv { + err := binary.Write(w, binary.BigEndian, fieldsOffsets[fieldID]) + if err != nil { + return 0, err + } + } + + return rv, nil +} + +func persistFooter(footer *footer, writerIn io.Writer) error { + w := newCountHashWriter(writerIn) + w.crc = footer.crc + + // write out the number of docs + err := binary.Write(w, binary.BigEndian, footer.numDocs) + if err != nil { + return err + } + // write out the stored field index location: + err = binary.Write(w, binary.BigEndian, footer.storedIndexOffset) + if err != nil { + return err + } + // write out the field index location + err = binary.Write(w, binary.BigEndian, footer.fieldsIndexOffset) + if err != nil { + return err + } + // write out the fieldDocValue location + err = binary.Write(w, binary.BigEndian, footer.docValueOffset) + if err != nil { + return err + } + // write out 32-bit chunk factor + err = binary.Write(w, binary.BigEndian, footer.chunkMode) + if err != nil { + return err + } + // write out 32-bit version + err = binary.Write(w, binary.BigEndian, Version) + if err != nil { + return err + } + // write out CRC-32 of everything upto but not including this CRC + err = binary.Write(w, binary.BigEndian, w.crc) + if err != nil { + return err + } + return nil +} + +func writeUvarints(w io.Writer, vals ...uint64) (err error) { + buf := make([]byte, binary.MaxVarintLen64) + for _, val := range vals { + n := binary.PutUvarint(buf, val) + _, err = w.Write(buf[:n]) + if err != nil { + return err + } + } + return err +} diff --git a/vendor/github.com/blevesearch/zapx/v12/.gitignore b/vendor/github.com/blugelabs/query_string/.gitignore similarity index 78% rename from vendor/github.com/blevesearch/zapx/v12/.gitignore rename to vendor/github.com/blugelabs/query_string/.gitignore index 46d1cfad5..1a5c85f8f 100644 --- a/vendor/github.com/blevesearch/zapx/v12/.gitignore +++ b/vendor/github.com/blugelabs/query_string/.gitignore @@ -7,6 +7,7 @@ **/.idea/ **/*.iml .DS_Store -/cmd/zap/zap +/cmd/ice/ice *.test tags +/y.output diff --git a/vendor/github.com/blugelabs/query_string/.golangci.yml b/vendor/github.com/blugelabs/query_string/.golangci.yml new file mode 100644 index 000000000..3f3e02888 --- /dev/null +++ b/vendor/github.com/blugelabs/query_string/.golangci.yml @@ -0,0 +1,162 @@ +linters-settings: + depguard: + list-type: blacklist + packages: + # logging is allowed only by logutils.Log, logrus + # is allowed to use only in logutils package + - github.com/sirupsen/logrus + packages-with-error-message: + - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 2 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - whyNoLint + - wrapperFunc + gocyclo: + min-complexity: 20 # increased from 15 to get us going, but not make things worse + goimports: + local-prefixes: github.com/golangci/golangci-lint + golint: + min-confidence: 0 + gomnd: + settings: + mnd: + # don't include the "operation" and "assign" + checks: argument,case,condition,return + govet: + check-shadowing: true + settings: + printf: + funcs: + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + nolintlint: + allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space) + allow-unused: false # report any unused nolint directives + require-explanation: false # don't require an explanation for nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +linters: + # please, do not use `enable-all`: it's deprecated and will be removed soon. + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - dupl + - errcheck + - funlen + - gochecknoinits + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - golint + - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - lll + - misspell + - nakedret + - nolintlint + - rowserrcheck + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + # whitespace is disabled because it seems to get confused by some contents of the generated .y.go file + # further, ignoring it in the exclude rules below doesn't help: + # https://github.com/golangci/golangci-lint/issues/913 + #- whitespace + + # don't enable: + # - asciicheck + # - gochecknoglobals + # - gocognit + # - godot + # - godox + # - goerr113 + # - maligned + # - nestif + # - prealloc + # - testpackage + # - wsl + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: query_string.y.go + linters: + - whitespace + - staticcheck + - gomnd + - structcheck + - lll + - ineffassign + - golint + - goimports + - gocritic + - stylecheck + - unused + - path: _test\.go + linters: + - gomnd + - funlen + - path: sizes.go + linters: + - gochecknoinits + + # https://github.com/go-critic/go-critic/issues/926 + - linters: + - gocritic + text: "unnecessaryDefer:" + +run: + skip-dirs: + - test/testdata_etc + - internal/cache + - internal/renameio + - internal/robustio + +# golangci.com configuration +# https://github.com/golangci/golangci/wiki/Configuration +service: + golangci-lint-version: 1.23.x # use the fixed version to not introduce new linters unexpectedly + prepare: + - echo "here I can run custom commands, but no preparation needed for this repo" diff --git a/vendor/github.com/blevesearch/upsidedown_store_api/LICENSE b/vendor/github.com/blugelabs/query_string/LICENSE similarity index 100% rename from vendor/github.com/blevesearch/upsidedown_store_api/LICENSE rename to vendor/github.com/blugelabs/query_string/LICENSE diff --git a/vendor/github.com/blugelabs/query_string/README.md b/vendor/github.com/blugelabs/query_string/README.md new file mode 100644 index 000000000..7cf411574 --- /dev/null +++ b/vendor/github.com/blugelabs/query_string/README.md @@ -0,0 +1,3 @@ +# query_string + +An adaptation of the class Bleve query string functionality for Bluge. \ No newline at end of file diff --git a/vendor/github.com/blugelabs/query_string/query_string.y b/vendor/github.com/blugelabs/query_string/query_string.y new file mode 100644 index 000000000..4c3372068 --- /dev/null +++ b/vendor/github.com/blugelabs/query_string/query_string.y @@ -0,0 +1,233 @@ +%{ +package querystr + +import( + "github.com/blugelabs/bluge" +) +%} + +%union { +s string +n int +f float64 +q bluge.Query +pf *float64} + +%token tSTRING tPHRASE tPLUS tMINUS tCOLON tBOOST tNUMBER tSTRING tGREATER tLESS +tEQUAL tTILDE + +%type tSTRING +%type tPHRASE +%type tNUMBER +%type posOrNegNumber +%type tTILDE +%type tBOOST +%type searchBase +%type searchSuffix +%type searchPrefix + +%% + +input: +searchParts { + yylex.(*lexerWrapper).logDebugGrammarf("INPUT") +}; + +searchParts: +searchPart searchParts { + yylex.(*lexerWrapper).logDebugGrammarf("SEARCH PARTS") +} +| +searchPart { + yylex.(*lexerWrapper).logDebugGrammarf("SEARCH PART") +}; + +searchPart: +searchPrefix searchBase searchSuffix { + q := $2 + if $3 != nil { + var err error + q, err = queryStringSetBoost($2, *$3) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + } + switch($1) { + case queryShould: + yylex.(*lexerWrapper).query.AddShould(q) + case queryMust: + yylex.(*lexerWrapper).query.AddMust(q) + case queryMustNot: + yylex.(*lexerWrapper).query.AddMustNot(q) + } +}; + + +searchPrefix: +/* empty */ { + $$ = queryShould +} +| +tPLUS { + yylex.(*lexerWrapper).logDebugGrammarf("PLUS") + $$ = queryMust +} +| +tMINUS { + yylex.(*lexerWrapper).logDebugGrammarf("MINUS") + $$ = queryMustNot +}; + +searchBase: +tSTRING { + yylex.(*lexerWrapper).logDebugGrammarf("STRING - %s", $1) + $$ = queryStringStringToken(yylex, "", $1) +} +| +tSTRING tTILDE { + yylex.(*lexerWrapper).logDebugGrammarf("FUZZY STRING - %s %s", $1, $2) + q, err := queryStringStringTokenFuzzy(yylex, "", $1, $2) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tSTRING tCOLON tSTRING tTILDE { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - %s FUZZY STRING - %s %s", $1, $3, $4) + q, err := queryStringStringTokenFuzzy(yylex, $1, $3, $4) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tNUMBER { + yylex.(*lexerWrapper).logDebugGrammarf("STRING - %s", $1) + q, err := queryStringNumberToken(yylex, "", $1) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tPHRASE { + yylex.(*lexerWrapper).logDebugGrammarf("PHRASE - %s", $1) + $$ = queryStringPhraseToken("", $1) +} +| +tSTRING tCOLON tSTRING { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - %s STRING - %s", $1, $3) + $$ = queryStringStringToken(yylex, $1, $3) +} +| +tSTRING tCOLON posOrNegNumber { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - %s STRING - %s", $1, $3) + q, err := queryStringNumberToken(yylex, $1, $3) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tSTRING tCOLON tPHRASE { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - %s PHRASE - %s", $1, $3) + $$ = queryStringPhraseToken($1, $3) +} +| +tSTRING tCOLON tGREATER posOrNegNumber { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - GREATER THAN %s", $4) + q, err := queryStringNumericRangeGreaterThanOrEqual($1, $4, false) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tSTRING tCOLON tGREATER tEQUAL posOrNegNumber { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - GREATER THAN OR EQUAL %s", $5) + q, err := queryStringNumericRangeGreaterThanOrEqual($1, $5, true) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tSTRING tCOLON tLESS posOrNegNumber { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - LESS THAN %s", $4) + q, err := queryStringNumericRangeLessThanOrEqual($1, $4, false) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tSTRING tCOLON tLESS tEQUAL posOrNegNumber { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - LESS THAN OR EQUAL %s", $5) + q, err := queryStringNumericRangeLessThanOrEqual($1, $5, true) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tSTRING tCOLON tGREATER tPHRASE { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - GREATER THAN DATE %s", $4) + q, err := queryStringDateRangeGreaterThanOrEqual(yylex, $1, $4, false) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tSTRING tCOLON tGREATER tEQUAL tPHRASE { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - GREATER THAN OR EQUAL DATE %s", $5) + q, err := queryStringDateRangeGreaterThanOrEqual(yylex, $1, $5, true) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tSTRING tCOLON tLESS tPHRASE { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - LESS THAN DATE %s", $4) + q, err := queryStringDateRangeLessThanOrEqual(yylex, $1, $4, false) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +} +| +tSTRING tCOLON tLESS tEQUAL tPHRASE { + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - LESS THAN OR EQUAL DATE %s", $5) + q, err := queryStringDateRangeLessThanOrEqual(yylex, $1, $5, true) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } + $$ = q +}; + +searchSuffix: +/* empty */ { + $$ = nil +} +| +tBOOST { + $$ = nil + yylex.(*lexerWrapper).logDebugGrammarf("BOOST %s", $1) + boost, err := queryStringParseBoost($1) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) + } else { + $$ = &boost + } +}; + +posOrNegNumber: +tNUMBER { + $$ = $1 +} +| +tMINUS tNUMBER { + $$ = "-" + $2 +}; diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.y.go b/vendor/github.com/blugelabs/query_string/query_string.y.go similarity index 56% rename from vendor/github.com/blevesearch/bleve/v2/search/query/query_string.y.go rename to vendor/github.com/blugelabs/query_string/query_string.y.go index 3a2abc132..5d59556e8 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.y.go +++ b/vendor/github.com/blugelabs/query_string/query_string.y.go @@ -1,31 +1,21 @@ -// Code generated by goyacc -o query_string.y.go query_string.y. DO NOT EDIT. - //line query_string.y:2 -package query +package querystr import __yyfmt__ "fmt" //line query_string.y:2 + import ( - "fmt" - "strconv" - "strings" - "time" + "github.com/blugelabs/bluge" ) -func logDebugGrammar(format string, v ...interface{}) { - if debugParser { - logger.Printf(format, v...) - } -} - -//line query_string.y:17 +//line query_string.y:9 type yySymType struct { yys int s string n int f float64 - q Query + q bluge.Query pf *float64 } @@ -72,61 +62,55 @@ var yyExca = [...]int{ -1, 3, 1, 3, -2, 5, - -1, 9, - 8, 29, - -2, 8, - -1, 12, - 8, 28, - -2, 12, } const yyPrivate = 57344 -const yyLast = 43 +const yyLast = 42 var yyAct = [...]int{ - 18, 17, 19, 24, 23, 15, 31, 22, 20, 21, - 30, 27, 23, 23, 3, 22, 22, 14, 29, 26, - 16, 25, 28, 35, 33, 23, 23, 32, 22, 22, - 34, 9, 12, 1, 5, 6, 2, 11, 4, 13, - 7, 8, 10, + 17, 16, 18, 23, 22, 30, 3, 21, 19, 20, + 29, 26, 22, 22, 1, 21, 21, 15, 28, 25, + 24, 27, 34, 14, 22, 13, 31, 21, 32, 33, + 22, 9, 11, 21, 5, 6, 2, 10, 4, 12, + 7, 8, } var yyPact = [...]int{ - 28, -1000, -1000, 28, 27, -1000, -1000, -1000, 8, -9, - 12, -1000, -1000, -1000, -1000, -1000, -3, -11, -1000, -1000, - 6, 5, -1000, -4, -1000, -1000, 19, -1000, -1000, 18, - -1000, -1000, -1000, -1000, -1000, -1000, + 28, -1000, -1000, 28, 27, -1000, -1000, -1000, 16, 9, + -1000, -1000, -1000, -1000, -1000, -3, -11, -1000, -1000, 6, + 5, -1000, -5, -1000, -1000, 23, -1000, -1000, 17, -1000, + -1000, -1000, -1000, -1000, -1000, } var yyPgo = [...]int{ - 0, 0, 42, 41, 39, 38, 33, 36, 14, + 0, 0, 41, 39, 38, 14, 36, 6, } var yyR1 = [...]int{ - 0, 6, 7, 7, 8, 5, 5, 5, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 4, 4, 1, 1, 2, 2, + 0, 5, 6, 6, 7, 4, 4, 4, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 3, 3, 1, 1, } var yyR2 = [...]int{ 0, 1, 2, 1, 3, 0, 1, 1, 1, 2, 4, 1, 1, 3, 3, 3, 4, 5, 4, 5, - 4, 5, 4, 5, 0, 1, 1, 2, 1, 1, + 4, 5, 4, 5, 0, 1, 1, 2, } var yyChk = [...]int{ - -1000, -6, -7, -8, -5, 6, 7, -7, -3, 4, - -2, 10, 5, -4, 9, 14, 8, 4, -1, 5, - 11, 12, 10, 7, 14, -1, 13, 5, -1, 13, - 5, 10, -1, 5, -1, 5, + -1000, -5, -6, -7, -4, 6, 7, -6, -2, 4, + 10, 5, -3, 9, 14, 8, 4, -1, 5, 11, + 12, 10, 7, 14, -1, 13, 5, -1, 13, 5, + 10, -1, 5, -1, 5, } var yyDef = [...]int{ - 5, -2, 1, -2, 0, 6, 7, 2, 24, -2, - 0, 11, -2, 4, 25, 9, 0, 13, 14, 15, - 0, 0, 26, 0, 10, 16, 0, 20, 18, 0, - 22, 27, 17, 21, 19, 23, + 5, -2, 1, -2, 0, 6, 7, 2, 24, 8, + 11, 12, 4, 25, 9, 0, 13, 14, 15, 0, + 0, 26, 0, 10, 16, 0, 20, 18, 0, 22, + 27, 17, 21, 19, 23, } var yyTok1 = [...]int{ @@ -481,353 +465,254 @@ yydefault: case 1: yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:41 +//line query_string.y:32 { - logDebugGrammar("INPUT") + yylex.(*lexerWrapper).logDebugGrammarf("INPUT") } case 2: yyDollar = yyS[yypt-2 : yypt+1] -//line query_string.y:46 +//line query_string.y:37 { - logDebugGrammar("SEARCH PARTS") + yylex.(*lexerWrapper).logDebugGrammarf("SEARCH PARTS") } case 3: yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:50 +//line query_string.y:41 { - logDebugGrammar("SEARCH PART") + yylex.(*lexerWrapper).logDebugGrammarf("SEARCH PART") } case 4: yyDollar = yyS[yypt-3 : yypt+1] -//line query_string.y:55 +//line query_string.y:46 { - query := yyDollar[2].q + q := yyDollar[2].q if yyDollar[3].pf != nil { - if query, ok := query.(BoostableQuery); ok { - query.SetBoost(*yyDollar[3].pf) + var err error + q, err = queryStringSetBoost(yyDollar[2].q, *yyDollar[3].pf) + if err != nil { + yylex.(*lexerWrapper).lex.Error(err.Error()) } } switch yyDollar[1].n { case queryShould: - yylex.(*lexerWrapper).query.AddShould(query) + yylex.(*lexerWrapper).query.AddShould(q) case queryMust: - yylex.(*lexerWrapper).query.AddMust(query) + yylex.(*lexerWrapper).query.AddMust(q) case queryMustNot: - yylex.(*lexerWrapper).query.AddMustNot(query) + yylex.(*lexerWrapper).query.AddMustNot(q) } } case 5: yyDollar = yyS[yypt-0 : yypt+1] -//line query_string.y:74 +//line query_string.y:67 { yyVAL.n = queryShould } case 6: yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:78 +//line query_string.y:71 { - logDebugGrammar("PLUS") + yylex.(*lexerWrapper).logDebugGrammarf("PLUS") yyVAL.n = queryMust } case 7: yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:83 +//line query_string.y:76 { - logDebugGrammar("MINUS") + yylex.(*lexerWrapper).logDebugGrammarf("MINUS") yyVAL.n = queryMustNot } case 8: yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:89 +//line query_string.y:82 { - str := yyDollar[1].s - logDebugGrammar("STRING - %s", str) - var q FieldableQuery - if strings.HasPrefix(str, "/") && strings.HasSuffix(str, "/") { - q = NewRegexpQuery(str[1 : len(str)-1]) - } else if strings.ContainsAny(str, "*?") { - q = NewWildcardQuery(str) - } else { - q = NewMatchQuery(str) - } - yyVAL.q = q + yylex.(*lexerWrapper).logDebugGrammarf("STRING - %s", yyDollar[1].s) + yyVAL.q = queryStringStringToken(yylex, "", yyDollar[1].s) } case 9: yyDollar = yyS[yypt-2 : yypt+1] -//line query_string.y:103 +//line query_string.y:87 { - str := yyDollar[1].s - fuzziness, err := strconv.ParseFloat(yyDollar[2].s, 64) + yylex.(*lexerWrapper).logDebugGrammarf("FUZZY STRING - %s %s", yyDollar[1].s, yyDollar[2].s) + q, err := queryStringStringTokenFuzzy(yylex, "", yyDollar[1].s, yyDollar[2].s) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid fuzziness value: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - logDebugGrammar("FUZZY STRING - %s %f", str, fuzziness) - q := NewMatchQuery(str) - q.SetFuzziness(int(fuzziness)) yyVAL.q = q } case 10: yyDollar = yyS[yypt-4 : yypt+1] -//line query_string.y:115 +//line query_string.y:96 { - field := yyDollar[1].s - str := yyDollar[3].s - fuzziness, err := strconv.ParseFloat(yyDollar[4].s, 64) + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - %s FUZZY STRING - %s %s", yyDollar[1].s, yyDollar[3].s, yyDollar[4].s) + q, err := queryStringStringTokenFuzzy(yylex, yyDollar[1].s, yyDollar[3].s, yyDollar[4].s) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid fuzziness value: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - logDebugGrammar("FIELD - %s FUZZY STRING - %s %f", field, str, fuzziness) - q := NewMatchQuery(str) - q.SetFuzziness(int(fuzziness)) - q.SetField(field) yyVAL.q = q } case 11: yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:129 +//line query_string.y:105 { - str := yyDollar[1].s - logDebugGrammar("STRING - %s", str) - q1 := NewMatchQuery(str) - val, err := strconv.ParseFloat(yyDollar[1].s, 64) + yylex.(*lexerWrapper).logDebugGrammarf("STRING - %s", yyDollar[1].s) + q, err := queryStringNumberToken(yylex, "", yyDollar[1].s) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - inclusive := true - q2 := NewNumericRangeInclusiveQuery(&val, &val, &inclusive, &inclusive) - q := NewDisjunctionQuery([]Query{q1, q2}) - q.queryStringMode = true yyVAL.q = q } case 12: yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:144 +//line query_string.y:114 { - phrase := yyDollar[1].s - logDebugGrammar("PHRASE - %s", phrase) - q := NewMatchPhraseQuery(phrase) - yyVAL.q = q + yylex.(*lexerWrapper).logDebugGrammarf("PHRASE - %s", yyDollar[1].s) + yyVAL.q = queryStringPhraseToken("", yyDollar[1].s) } case 13: yyDollar = yyS[yypt-3 : yypt+1] -//line query_string.y:151 +//line query_string.y:119 { - field := yyDollar[1].s - str := yyDollar[3].s - logDebugGrammar("FIELD - %s STRING - %s", field, str) - var q FieldableQuery - if strings.HasPrefix(str, "/") && strings.HasSuffix(str, "/") { - q = NewRegexpQuery(str[1 : len(str)-1]) - } else if strings.ContainsAny(str, "*?") { - q = NewWildcardQuery(str) - } else { - q = NewMatchQuery(str) - } - q.SetField(field) - yyVAL.q = q + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - %s STRING - %s", yyDollar[1].s, yyDollar[3].s) + yyVAL.q = queryStringStringToken(yylex, yyDollar[1].s, yyDollar[3].s) } case 14: yyDollar = yyS[yypt-3 : yypt+1] -//line query_string.y:167 +//line query_string.y:124 { - field := yyDollar[1].s - str := yyDollar[3].s - logDebugGrammar("FIELD - %s STRING - %s", field, str) - q1 := NewMatchQuery(str) - q1.SetField(field) - val, err := strconv.ParseFloat(yyDollar[3].s, 64) + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - %s STRING - %s", yyDollar[1].s, yyDollar[3].s) + q, err := queryStringNumberToken(yylex, yyDollar[1].s, yyDollar[3].s) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - inclusive := true - q2 := NewNumericRangeInclusiveQuery(&val, &val, &inclusive, &inclusive) - q2.SetField(field) - q := NewDisjunctionQuery([]Query{q1, q2}) - q.queryStringMode = true yyVAL.q = q } case 15: yyDollar = yyS[yypt-3 : yypt+1] -//line query_string.y:185 +//line query_string.y:133 { - field := yyDollar[1].s - phrase := yyDollar[3].s - logDebugGrammar("FIELD - %s PHRASE - %s", field, phrase) - q := NewMatchPhraseQuery(phrase) - q.SetField(field) - yyVAL.q = q + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - %s PHRASE - %s", yyDollar[1].s, yyDollar[3].s) + yyVAL.q = queryStringPhraseToken(yyDollar[1].s, yyDollar[3].s) } case 16: yyDollar = yyS[yypt-4 : yypt+1] -//line query_string.y:194 +//line query_string.y:138 { - field := yyDollar[1].s - min, err := strconv.ParseFloat(yyDollar[4].s, 64) + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - GREATER THAN %s", yyDollar[4].s) + q, err := queryStringNumericRangeGreaterThanOrEqual(yyDollar[1].s, yyDollar[4].s, false) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - minInclusive := false - logDebugGrammar("FIELD - GREATER THAN %f", min) - q := NewNumericRangeInclusiveQuery(&min, nil, &minInclusive, nil) - q.SetField(field) yyVAL.q = q } case 17: yyDollar = yyS[yypt-5 : yypt+1] -//line query_string.y:207 +//line query_string.y:147 { - field := yyDollar[1].s - min, err := strconv.ParseFloat(yyDollar[5].s, 64) + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - GREATER THAN OR EQUAL %s", yyDollar[5].s) + q, err := queryStringNumericRangeGreaterThanOrEqual(yyDollar[1].s, yyDollar[5].s, true) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - minInclusive := true - logDebugGrammar("FIELD - GREATER THAN OR EQUAL %f", min) - q := NewNumericRangeInclusiveQuery(&min, nil, &minInclusive, nil) - q.SetField(field) yyVAL.q = q } case 18: yyDollar = yyS[yypt-4 : yypt+1] -//line query_string.y:220 +//line query_string.y:156 { - field := yyDollar[1].s - max, err := strconv.ParseFloat(yyDollar[4].s, 64) + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - LESS THAN %s", yyDollar[4].s) + q, err := queryStringNumericRangeLessThanOrEqual(yyDollar[1].s, yyDollar[4].s, false) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - maxInclusive := false - logDebugGrammar("FIELD - LESS THAN %f", max) - q := NewNumericRangeInclusiveQuery(nil, &max, nil, &maxInclusive) - q.SetField(field) yyVAL.q = q } case 19: yyDollar = yyS[yypt-5 : yypt+1] -//line query_string.y:233 +//line query_string.y:165 { - field := yyDollar[1].s - max, err := strconv.ParseFloat(yyDollar[5].s, 64) + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - LESS THAN OR EQUAL %s", yyDollar[5].s) + q, err := queryStringNumericRangeLessThanOrEqual(yyDollar[1].s, yyDollar[5].s, true) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("error parsing number: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - maxInclusive := true - logDebugGrammar("FIELD - LESS THAN OR EQUAL %f", max) - q := NewNumericRangeInclusiveQuery(nil, &max, nil, &maxInclusive) - q.SetField(field) yyVAL.q = q } case 20: yyDollar = yyS[yypt-4 : yypt+1] -//line query_string.y:246 +//line query_string.y:174 { - field := yyDollar[1].s - minInclusive := false - phrase := yyDollar[4].s - - logDebugGrammar("FIELD - GREATER THAN DATE %s", phrase) - minTime, err := queryTimeFromString(phrase) + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - GREATER THAN DATE %s", yyDollar[4].s) + q, err := queryStringDateRangeGreaterThanOrEqual(yylex, yyDollar[1].s, yyDollar[4].s, false) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid time: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - q := NewDateRangeInclusiveQuery(minTime, time.Time{}, &minInclusive, nil) - q.SetField(field) yyVAL.q = q } case 21: yyDollar = yyS[yypt-5 : yypt+1] -//line query_string.y:261 +//line query_string.y:183 { - field := yyDollar[1].s - minInclusive := true - phrase := yyDollar[5].s - - logDebugGrammar("FIELD - GREATER THAN OR EQUAL DATE %s", phrase) - minTime, err := queryTimeFromString(phrase) + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - GREATER THAN OR EQUAL DATE %s", yyDollar[5].s) + q, err := queryStringDateRangeGreaterThanOrEqual(yylex, yyDollar[1].s, yyDollar[5].s, true) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid time: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - q := NewDateRangeInclusiveQuery(minTime, time.Time{}, &minInclusive, nil) - q.SetField(field) yyVAL.q = q } case 22: yyDollar = yyS[yypt-4 : yypt+1] -//line query_string.y:276 +//line query_string.y:192 { - field := yyDollar[1].s - maxInclusive := false - phrase := yyDollar[4].s - - logDebugGrammar("FIELD - LESS THAN DATE %s", phrase) - maxTime, err := queryTimeFromString(phrase) + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - LESS THAN DATE %s", yyDollar[4].s) + q, err := queryStringDateRangeLessThanOrEqual(yylex, yyDollar[1].s, yyDollar[4].s, false) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid time: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - q := NewDateRangeInclusiveQuery(time.Time{}, maxTime, nil, &maxInclusive) - q.SetField(field) yyVAL.q = q } case 23: yyDollar = yyS[yypt-5 : yypt+1] -//line query_string.y:291 +//line query_string.y:201 { - field := yyDollar[1].s - maxInclusive := true - phrase := yyDollar[5].s - - logDebugGrammar("FIELD - LESS THAN OR EQUAL DATE %s", phrase) - maxTime, err := queryTimeFromString(phrase) + yylex.(*lexerWrapper).logDebugGrammarf("FIELD - LESS THAN OR EQUAL DATE %s", yyDollar[5].s) + q, err := queryStringDateRangeLessThanOrEqual(yylex, yyDollar[1].s, yyDollar[5].s, true) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid time: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } - q := NewDateRangeInclusiveQuery(time.Time{}, maxTime, nil, &maxInclusive) - q.SetField(field) yyVAL.q = q } case 24: yyDollar = yyS[yypt-0 : yypt+1] -//line query_string.y:307 +//line query_string.y:211 { yyVAL.pf = nil } case 25: yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:311 +//line query_string.y:215 { yyVAL.pf = nil - boost, err := strconv.ParseFloat(yyDollar[1].s, 64) + yylex.(*lexerWrapper).logDebugGrammarf("BOOST %s", yyDollar[1].s) + boost, err := queryStringParseBoost(yyDollar[1].s) if err != nil { - yylex.(*lexerWrapper).lex.Error(fmt.Sprintf("invalid boost value: %v", err)) + yylex.(*lexerWrapper).lex.Error(err.Error()) } else { yyVAL.pf = &boost } - logDebugGrammar("BOOST %f", boost) } case 26: yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:323 +//line query_string.y:227 { yyVAL.s = yyDollar[1].s } case 27: yyDollar = yyS[yypt-2 : yypt+1] -//line query_string.y:327 +//line query_string.y:231 { yyVAL.s = "-" + yyDollar[2].s } - case 28: - yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:332 - { - yyVAL.s = yyDollar[1].s - } - case 29: - yyDollar = yyS[yypt-1 : yypt+1] -//line query_string.y:336 - { - yyVAL.s = yyDollar[1].s - } } goto yystack /* stack new state and value */ } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string_lex.go b/vendor/github.com/blugelabs/query_string/query_string_lex.go similarity index 83% rename from vendor/github.com/blevesearch/bleve/v2/search/query/query_string_lex.go rename to vendor/github.com/blugelabs/query_string/query_string_lex.go index 3a9cf2398..da07d7754 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string_lex.go +++ b/vendor/github.com/blugelabs/query_string/query_string_lex.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Couchbase, Inc. +// Copyright (c) 2020 Couchbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package query +package querystr import ( "bufio" "io" + "log" "strings" "unicode" ) @@ -44,6 +45,8 @@ type queryStringLex struct { nextRune rune nextRuneSize int atEOF bool + debugLexer bool + logger *log.Logger } func (l *queryStringLex) reset() { @@ -82,11 +85,13 @@ func (l *queryStringLex) Lex(lval *yySymType) int { return rv } -func newQueryStringLex(in io.Reader) *queryStringLex { +func newQueryStringLex(in io.Reader, options QueryStringOptions) *queryStringLex { return &queryStringLex{ in: bufio.NewReader(in), currState: startState, currConsumed: true, + debugLexer: options.debugLexer, + logger: options.logger, } } @@ -147,7 +152,7 @@ func inPhraseState(l *queryStringLex, next rune, eof bool) (lexState, bool) { l.nextToken = &yySymType{ s: l.buf, } - logDebugTokens("PHRASE - '%s'", l.nextToken.s) + l.logDebugTokensf("PHRASE - '%s'", l.nextToken.s) l.reset() return startState, true } else if !l.inEscape && next == '\\' { @@ -169,22 +174,22 @@ func singleCharOpState(l *queryStringLex, next rune, eof bool) (lexState, bool) switch l.buf { case "+": l.nextTokenType = tPLUS - logDebugTokens("PLUS") + l.logDebugTokensf("PLUS") case "-": l.nextTokenType = tMINUS - logDebugTokens("MINUS") + l.logDebugTokensf("MINUS") case ":": l.nextTokenType = tCOLON - logDebugTokens("COLON") + l.logDebugTokensf("COLON") case ">": l.nextTokenType = tGREATER - logDebugTokens("GREATER") + l.logDebugTokensf("GREATER") case "<": l.nextTokenType = tLESS - logDebugTokens("LESS") + l.logDebugTokensf("LESS") case "=": l.nextTokenType = tEQUAL - logDebugTokens("EQUAL") + l.logDebugTokensf("EQUAL") } l.reset() @@ -192,46 +197,27 @@ func singleCharOpState(l *queryStringLex, next rune, eof bool) (lexState, bool) } func inBoostState(l *queryStringLex, next rune, eof bool) (lexState, bool) { - - // only a non-escaped space ends the boost (or eof) - if eof || (!l.inEscape && next == ' ') { - // end boost - l.nextTokenType = tBOOST - if l.buf == "" { - l.buf = "1" - } - l.nextToken = &yySymType{ - s: l.buf, - } - logDebugTokens("BOOST - '%s'", l.nextToken.s) - l.reset() - return startState, true - } else if !l.inEscape && next == '\\' { - l.inEscape = true - } else if l.inEscape { - // if in escape, end it - l.inEscape = false - l.buf += unescape(string(next)) - } else { - l.buf += string(next) - } - - return inBoostState, true + return inBoostOrTildeState(l, next, eof, tBOOST, "BOOST", inBoostState) } func inTildeState(l *queryStringLex, next rune, eof bool) (lexState, bool) { + return inBoostOrTildeState(l, next, eof, tTILDE, "TILDE", inTildeState) +} - // only a non-escaped space ends the tilde (or eof) +func inBoostOrTildeState(l *queryStringLex, next rune, eof bool, nextTokenType int, name string, + inState lexState) (lexState, bool) { + + // only a non-escaped space ends the boost (or eof) if eof || (!l.inEscape && next == ' ') { - // end tilde - l.nextTokenType = tTILDE + // end boost or tilde + l.nextTokenType = nextTokenType if l.buf == "" { l.buf = "1" } l.nextToken = &yySymType{ s: l.buf, } - logDebugTokens("TILDE - '%s'", l.nextToken.s) + l.logDebugTokensf("%s - '%s'", name, l.nextToken.s) l.reset() return startState, true } else if !l.inEscape && next == '\\' { @@ -244,7 +230,7 @@ func inTildeState(l *queryStringLex, next rune, eof bool) (lexState, bool) { l.buf += string(next) } - return inTildeState, true + return inState, true } func inNumOrStrState(l *queryStringLex, next rune, eof bool) (lexState, bool) { @@ -255,7 +241,7 @@ func inNumOrStrState(l *queryStringLex, next rune, eof bool) (lexState, bool) { l.nextToken = &yySymType{ s: l.buf, } - logDebugTokens("NUMBER - '%s'", l.nextToken.s) + l.logDebugTokensf("NUMBER - '%s'", l.nextToken.s) l.reset() return startState, true } else if !l.inEscape && next == '\\' { @@ -294,7 +280,7 @@ func inStrState(l *queryStringLex, next rune, eof bool) (lexState, bool) { l.nextToken = &yySymType{ s: l.buf, } - logDebugTokens("STRING - '%s'", l.nextToken.s) + l.logDebugTokensf("STRING - '%s'", l.nextToken.s) l.reset() consumed := true @@ -316,8 +302,8 @@ func inStrState(l *queryStringLex, next rune, eof bool) (lexState, bool) { return inStrState, true } -func logDebugTokens(format string, v ...interface{}) { - if debugLexer { - logger.Printf(format, v...) +func (l *queryStringLex) logDebugTokensf(format string, v ...interface{}) { + if l.debugLexer { + l.logger.Printf(format, v...) } } diff --git a/vendor/github.com/blugelabs/query_string/query_string_parser.go b/vendor/github.com/blugelabs/query_string/query_string_parser.go new file mode 100644 index 000000000..35e3f3047 --- /dev/null +++ b/vendor/github.com/blugelabs/query_string/query_string_parser.go @@ -0,0 +1,293 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// as of Go 1.8 this requires the goyacc external tool +// available from golang.org/x/tools/cmd/goyacc + +//go:generate goyacc -o query_string.y.go query_string.y +//go:generate sed -i.tmp -e 1d query_string.y.go +//go:generate rm query_string.y.go.tmp +//go:generate gofmt -s -w query_string.y.go + +// note: OSX sed and gnu sed handle the -i (in-place) option differently. +// using -i.tmp works on both, at the expense of having to remove +// the unsightly .tmp files + +package querystr + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/blugelabs/bluge" + "github.com/blugelabs/bluge/analysis" +) + +type QueryStringOptions struct { + debugParser bool + debugLexer bool + debugAnalyzer bool + dateFormat string + logger *log.Logger + analyzers map[string]*analysis.Analyzer + defaultAnalyzer *analysis.Analyzer +} + +func DefaultOptions() QueryStringOptions { + return QueryStringOptions{ + dateFormat: time.RFC3339, + analyzers: make(map[string]*analysis.Analyzer), + } +} + +func (o QueryStringOptions) WithDebugParser(debug bool) QueryStringOptions { + o.debugParser = debug + return o +} + +func (o QueryStringOptions) WithDebugLexer(debug bool) QueryStringOptions { + o.debugLexer = debug + return o +} + +func (o QueryStringOptions) WithDebugAnalyzer(debug bool) QueryStringOptions { + o.debugAnalyzer = debug + return o +} + +func (o QueryStringOptions) WithDateFormat(dateFormat string) QueryStringOptions { + o.dateFormat = dateFormat + return o +} + +func (o QueryStringOptions) WithLogger(logger *log.Logger) QueryStringOptions { + o.logger = logger + return o +} + +func (o QueryStringOptions) WithAnalyzerForField(field string, analyzer *analysis.Analyzer) QueryStringOptions { + o.analyzers[field] = analyzer + return o +} + +func (o QueryStringOptions) WithDefaultAnalyzer(analyzer *analysis.Analyzer) QueryStringOptions { + o.defaultAnalyzer = analyzer + return o +} + +func ParseQueryString(query string, options QueryStringOptions) (rq bluge.Query, err error) { + if query == "" { + return bluge.NewMatchNoneQuery(), nil + } + lex := newLexerWrapper(newQueryStringLex(strings.NewReader(query), options), options) + doParse(lex) + + if len(lex.errs) > 0 { + return nil, fmt.Errorf(strings.Join(lex.errs, "\n")) + } + return lex.query, nil +} + +func doParse(lex *lexerWrapper) { + defer func() { + r := recover() + if r != nil { + lex.errs = append(lex.errs, fmt.Sprintf("parse error: %v", r)) + } + }() + + yyParse(lex) +} + +const ( + queryShould = iota + queryMust + queryMustNot +) + +type lexerWrapper struct { + lex yyLexer + errs []string + query *bluge.BooleanQuery + debugParser bool + dateFormat string + logger *log.Logger + opt *QueryStringOptions +} + +func newLexerWrapper(lex yyLexer, options QueryStringOptions) *lexerWrapper { + return &lexerWrapper{ + lex: lex, + query: bluge.NewBooleanQuery(), + debugParser: options.debugParser, + dateFormat: options.dateFormat, + logger: options.logger, + opt: &options, + } +} + +func (l *lexerWrapper) Lex(lval *yySymType) int { + return l.lex.Lex(lval) +} + +func (l *lexerWrapper) Error(s string) { + l.errs = append(l.errs, s) +} + +func (l *lexerWrapper) logDebugGrammarf(format string, v ...interface{}) { + if l.debugParser { + l.logger.Printf(format, v...) + } +} + +func (l *lexerWrapper) logDebugAnalyzerf(format string, v ...interface{}) { + if l.opt.debugAnalyzer { + l.logger.Printf(format, v...) + } +} + +func queryTimeFromString(yylex yyLexer, t string) (time.Time, error) { + rv, err := time.Parse(yylex.(*lexerWrapper).dateFormat, t) + if err != nil { + return time.Time{}, err + } + return rv, nil +} + +func queryStringStringToken(yylex yyLexer, field, str string) bluge.Query { + if strings.HasPrefix(str, "/") && strings.HasSuffix(str, "/") { + return bluge.NewRegexpQuery(str[1 : len(str)-1]).SetField(field) + } else if strings.ContainsAny(str, "*?") { + return bluge.NewWildcardQuery(str).SetField(field) + } + rv := bluge.NewMatchQuery(str).SetField(field) + analyzer := analyzerForField(yylex, field) + if analyzer != nil { + rv.SetAnalyzer(analyzer) + } + return rv +} + +func queryStringStringTokenFuzzy(yylex yyLexer, field, str, fuzziness string) (*bluge.MatchQuery, error) { + fuzzy, err := strconv.ParseFloat(fuzziness, 64) + if err != nil { + return nil, fmt.Errorf("invalid fuzziness value: %v", err) + } + rv := bluge.NewMatchQuery(str).SetFuzziness(int(fuzzy)).SetField(field) + analyzer := analyzerForField(yylex, field) + if analyzer != nil { + rv.SetAnalyzer(analyzer) + } + return rv, nil +} + +func queryStringNumberToken(yylex yyLexer, field, str string) (bluge.Query, error) { + q1 := bluge.NewMatchQuery(str).SetField(field) + val, err := strconv.ParseFloat(str, 64) + if err != nil { + return nil, fmt.Errorf("error parsing number: %v", err) + } + analyzer := analyzerForField(yylex, field) + if analyzer != nil { + q1.SetAnalyzer(analyzer) + } + q2 := bluge.NewNumericRangeInclusiveQuery(val, val, true, true).SetField(field) + return bluge.NewBooleanQuery().AddShould([]bluge.Query{q1, q2}...), nil +} + +func queryStringPhraseToken(field, str string) *bluge.MatchPhraseQuery { + return bluge.NewMatchPhraseQuery(str).SetField(field) +} + +func queryStringNumericRangeGreaterThanOrEqual(field, str string, orEqual bool) (*bluge.NumericRangeQuery, error) { + min, err := strconv.ParseFloat(str, 64) + if err != nil { + return nil, fmt.Errorf("error parsing number: %v", err) + } + return bluge.NewNumericRangeInclusiveQuery(min, bluge.MaxNumeric, orEqual, true). + SetField(field), nil +} + +func queryStringNumericRangeLessThanOrEqual(field, str string, orEqual bool) (*bluge.NumericRangeQuery, error) { + max, err := strconv.ParseFloat(str, 64) + if err != nil { + return nil, fmt.Errorf("error parsing number: %v", err) + } + return bluge.NewNumericRangeInclusiveQuery(bluge.MinNumeric, max, true, orEqual). + SetField(field), nil +} + +func queryStringDateRangeGreaterThanOrEqual(yylex yyLexer, field, phrase string, orEqual bool) (*bluge.DateRangeQuery, error) { + minTime, err := queryTimeFromString(yylex, phrase) + if err != nil { + return nil, fmt.Errorf("invalid time: %v", err) + } + return bluge.NewDateRangeInclusiveQuery(minTime, time.Time{}, orEqual, true). + SetField(field), nil +} + +func queryStringDateRangeLessThanOrEqual(yylex yyLexer, field, phrase string, orEqual bool) (*bluge.DateRangeQuery, error) { + maxTime, err := queryTimeFromString(yylex, phrase) + if err != nil { + return nil, fmt.Errorf("invalid time: %v", err) + } + return bluge.NewDateRangeInclusiveQuery(time.Time{}, maxTime, true, orEqual). + SetField(field), nil +} + +const noBoost = 1.0 + +func queryStringParseBoost(str string) (float64, error) { + boost, err := strconv.ParseFloat(str, 64) + if err != nil { + return noBoost, fmt.Errorf("invalid boost value: %v", err) + } + return boost, nil +} + +func queryStringSetBoost(q bluge.Query, b float64) (bluge.Query, error) { + switch v := q.(type) { + case *bluge.MatchQuery: + return v.SetBoost(b), nil + case *bluge.RegexpQuery: + return v.SetBoost(b), nil + case *bluge.WildcardQuery: + return v.SetBoost(b), nil + case *bluge.BooleanQuery: + return v.SetBoost(b), nil + case *bluge.NumericRangeQuery: + return v.SetBoost(b), nil + case *bluge.MatchPhraseQuery: + return v.SetBoost(b), nil + case *bluge.DateRangeQuery: + return v.SetBoost(b), nil + } + return nil, fmt.Errorf("cannot boost %T", q) +} + +func analyzerForField(yylex yyLexer, field string) *analysis.Analyzer { + lw := yylex.(*lexerWrapper) + if analyzer, ok := lw.opt.analyzers[field]; ok { + lw.logDebugAnalyzerf("specific analyzer used for field '%s'", field) + return analyzer + } else if lw.opt.defaultAnalyzer != nil { + lw.logDebugAnalyzerf("default analyzer used for field '%s'", field) + return lw.opt.defaultAnalyzer + } + lw.logDebugAnalyzerf("no analyzer set for field '%s'", field) + return nil +} diff --git a/vendor/github.com/caio/go-tdigest/.gitignore b/vendor/github.com/caio/go-tdigest/.gitignore new file mode 100644 index 000000000..f9f915f62 --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/.gitignore @@ -0,0 +1,2 @@ +vendor/ +go-tdigest.test diff --git a/vendor/github.com/caio/go-tdigest/CONTRIBUTING.md b/vendor/github.com/caio/go-tdigest/CONTRIBUTING.md new file mode 100644 index 000000000..3baa1d164 --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/CONTRIBUTING.md @@ -0,0 +1,42 @@ +# Contributing + +First and foremost: **thank you very much** for your interest in this +project. Feel free to skip all this and open your issue / pull request +if reading contribution guidelines is too much for you at this point. +We value your contribution a lot more than we value your ability to +follow rules (and thankfully we can afford to take this approach given +this project's demand). + +Any kind of contribution is welcome. We can always use better docs and +tests (and code, of course). If you think you can improve this project +in any dimension _let's talk_ :-) + +## Guidelines + +Be kind and respectful in all your interactions with people inside +(outside too!) this community; There is no excuse for not showing +basic decency. Sarcasm and generally unconstructive remarks are **not +welcome**. + +### Issues + +When opening and interacting with issues please: + +- Be as clear as possible +- Provide examples if you can + +### Pull Requests + +We expect that pull requests: + +- Have [good commit messages][commits] +- Contain tests for new features +- Target and can be cleanly merged with the `master` branch +- Pass the tests + +[commits]: https://www.git-scm.com/book/en/v2/Distributed-Git-Contributing-to-a-Project#_commit_guidelines + +### Project Management + +Don't bother with labels, milestones, assignments, etc. We don't make +use of those. diff --git a/vendor/github.com/caio/go-tdigest/Gopkg.lock b/vendor/github.com/caio/go-tdigest/Gopkg.lock new file mode 100644 index 000000000..65bf9067a --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/Gopkg.lock @@ -0,0 +1,41 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:cf63454c1e81409484ded047413228de0f7a3031f0fcd36d4e1db7620c3c7d1b" + name = "github.com/leesper/go_rng" + packages = ["."] + pruneopts = "" + revision = "5344a9259b21627d94279721ab1f27eb029194e7" + +[[projects]] + branch = "master" + digest = "1:ad6d9b2cce40c7c44952d49a6a324a2110db43b4279d9e599db74e45de5ae80c" + name = "gonum.org/v1/gonum" + packages = [ + "blas", + "blas/blas64", + "blas/gonum", + "floats", + "internal/asm/c128", + "internal/asm/f32", + "internal/asm/f64", + "internal/math32", + "lapack", + "lapack/gonum", + "lapack/lapack64", + "mat", + "stat", + ] + pruneopts = "" + revision = "f0982070f509ee139841ca385c44dc22a77c8da8" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/leesper/go_rng", + "gonum.org/v1/gonum/stat", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/caio/go-tdigest/Gopkg.toml b/vendor/github.com/caio/go-tdigest/Gopkg.toml new file mode 100644 index 000000000..323002ca8 --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/Gopkg.toml @@ -0,0 +1,21 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" diff --git a/vendor/github.com/caio/go-tdigest/LICENSE b/vendor/github.com/caio/go-tdigest/LICENSE new file mode 100644 index 000000000..f5f074401 --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Caio Romão Costa Nascimento + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/caio/go-tdigest/README.md b/vendor/github.com/caio/go-tdigest/README.md new file mode 100644 index 000000000..b63587025 --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/README.md @@ -0,0 +1,94 @@ +# T-Digest + +A fast map-reduce and parallel streaming friendly data-structure for accurate +quantile approximation. + +This package provides an implementation of Ted Dunning's t-digest data +structure in Go. + +[![GoDoc](https://godoc.org/github.com/caio/go-tdigest?status.svg)](http://godoc.org/github.com/caio/go-tdigest) +[![Go Report Card](https://goreportcard.com/badge/github.com/caio/go-tdigest)](https://goreportcard.com/report/github.com/caio/go-tdigest) + +## Project Status + +This project is actively maintained. We are happy to collaborate on features +and issues if/when they arrive. + +## Installation + +Our releases are tagged and signed following the [Semantic Versioning][semver] +scheme. If you are using a dependency manager such as [dep][], the recommended +way to is go about your business normally: + + go get github.com/caio/go-tdigest + +Otherwise we recommend to use the following so that you don't risk breaking +your build because of an API change: + + go get gopkg.in/caio/go-tdigest.v2 + +[semver]: http://semver.org/ +[dep]: https://github.com/golang/dep + +## Example Usage + +```go +package main + +import ( + "fmt" + "math/rand" + + "github.com/caio/go-tdigest" +) + +func main() { + // Analogue to tdigest.New(tdigest.Compression(100)) + t, _ := tdigest.New() + + for i := 0; i < 10000; i++ { + // Analogue to t.AddWeighted(rand.Float64(), 1) + t.Add(rand.Float64()) + } + + fmt.Printf("p(.5) = %.6f\n", t.Quantile(0.5)) + fmt.Printf("CDF(Quantile(.5)) = %.6f\n", t.CDF(t.Quantile(0.5))) +} +``` + +## Configuration + +You can configure your digest upon creation with options documented +at [options.go](options.go). Example: + +```go +// Construct a digest with compression=200 and its own +// (thread-unsafe) RNG seeded with 0xCA10: +digest, _ := tdigest.New( + tdigest.Compression(200), + tdigest.LocalRandomNumberGenerator(0xCA10), +) +``` + +## Porting Existing Code to the v2 API + +It's very easy to migrate to the new API: + +- Replace `tdigest.New(100)` with `tdigest.New()` +- Replace `tdigest.New(number)` with `tdigest.New(tdigest.Compression(number))` +- Replace `Add(x,1)` with `Add(x)` +- Replace `Add(x, weight)` with `AddWeighted(x, weight)` +- Remove any use of `tdigest.Len()` (or [open an issue][issues]) + +[issues]: https://github.com/caio/go-tdigest/issues/new + +## References + +This is a port of the [reference][1] implementation with some ideas borrowed +from the [python version][2]. If you wanna get a quick grasp of how it works +and why it's useful, [this video and companion article is pretty helpful][3]. + +[1]: https://github.com/tdunning/t-digest +[2]: https://github.com/CamDavidsonPilon/tdigest +[3]: https://www.mapr.com/blog/better-anomaly-detection-t-digest-whiteboard-walkthrough + diff --git a/vendor/github.com/caio/go-tdigest/options.go b/vendor/github.com/caio/go-tdigest/options.go new file mode 100644 index 000000000..c30b45954 --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/options.go @@ -0,0 +1,51 @@ +package tdigest + +import "errors" + +type tdigestOption func(*TDigest) error + +// Compression sets the digest compression +// +// The compression parameter rules the threshold in which samples are +// merged together - the more often distinct samples are merged the more +// precision is lost. Compression should be tuned according to your data +// distribution, but a value of 100 (the default) is often good enough. +// +// A higher compression value means holding more centroids in memory +// (thus: better precision), which means a bigger serialization payload, +// higher memory footprint and slower addition of new samples. +// +// Compression must be a value greater of equal to 1, will yield an +// error otherwise. +func Compression(compression float64) tdigestOption { // nolint + return func(t *TDigest) error { + if compression < 1 { + return errors.New("Compression should be >= 1") + } + t.compression = compression + return nil + } +} + +// RandomNumberGenerator sets the RNG to be used internally +// +// This allows changing which random number source is used when using +// the TDigest structure (rngs are used when deciding which candidate +// centroid to merge with and when compressing or merging with +// another digest for it increases accuracy). This functionality is +// particularly useful for testing or when you want to disconnect +// your sample collection from the (default) shared random source +// to minimize lock contention. +func RandomNumberGenerator(rng RNG) tdigestOption { // nolint + return func(t *TDigest) error { + t.rng = rng + return nil + } +} + +// LocalRandomNumberGenerator makes the TDigest use the default +// `math/random` functions but with an unshared source that is +// seeded with the given `seed` parameter. +func LocalRandomNumberGenerator(seed int64) tdigestOption { // nolint + return RandomNumberGenerator(newLocalRNG(seed)) +} diff --git a/vendor/github.com/caio/go-tdigest/rng.go b/vendor/github.com/caio/go-tdigest/rng.go new file mode 100644 index 000000000..856b6ad9f --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/rng.go @@ -0,0 +1,40 @@ +package tdigest + +import ( + "math/rand" +) + +// RNG is an interface that wraps the needed random number +// generator calls that tdigest uses during its runtime +type RNG interface { + Float32() float32 + Intn(int) int +} + +type globalRNG struct{} + +func (r globalRNG) Float32() float32 { + return rand.Float32() +} + +func (r globalRNG) Intn(i int) int { + return rand.Intn(i) +} + +type localRNG struct { + localRand *rand.Rand +} + +func newLocalRNG(seed int64) *localRNG { + return &localRNG{ + localRand: rand.New(rand.NewSource(seed)), + } +} + +func (r *localRNG) Float32() float32 { + return r.localRand.Float32() +} + +func (r *localRNG) Intn(i int) int { + return r.localRand.Intn(i) +} diff --git a/vendor/github.com/caio/go-tdigest/serialization.go b/vendor/github.com/caio/go-tdigest/serialization.go new file mode 100644 index 000000000..6acb658b1 --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/serialization.go @@ -0,0 +1,202 @@ +package tdigest + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" +) + +const smallEncoding int32 = 2 + +var endianess = binary.BigEndian + +// AsBytes serializes the digest into a byte array so it can be +// saved to disk or sent over the wire. +func (t TDigest) AsBytes() ([]byte, error) { + // TODO get rid of the (now) useless error + return t.ToBytes(make([]byte, t.requiredSize())), nil +} + +func (t *TDigest) requiredSize() int { + return 16 + (4 * len(t.summary.means)) + (len(t.summary.counts) * binary.MaxVarintLen64) +} + +// ToBytes serializes into the supplied slice, avoiding allocation if the slice +// is large enough. The result slice is returned. +func (t *TDigest) ToBytes(b []byte) []byte { + requiredSize := t.requiredSize() + if cap(b) < requiredSize { + b = make([]byte, requiredSize) + } + + // The binary.Put* functions helpfully don't extend the slice for you, they + // just panic if it's not already long enough. So pre-set the slice length; + // we'll return it with the actual encoded length. + b = b[:cap(b)] + + endianess.PutUint32(b[0:4], uint32(smallEncoding)) + endianess.PutUint64(b[4:12], math.Float64bits(t.compression)) + endianess.PutUint32(b[12:16], uint32(t.summary.Len())) + + var x float64 + idx := 16 + for _, mean := range t.summary.means { + delta := mean - x + x = mean + endianess.PutUint32(b[idx:], math.Float32bits(float32(delta))) + idx += 4 + } + + for _, count := range t.summary.counts { + idx += binary.PutUvarint(b[idx:], count) + } + return b[:idx] +} + +// FromBytes reads a byte buffer with a serialized digest (from AsBytes) +// and deserializes it. +// +// This function creates a new tdigest instance with the provided options, +// but ignores the compression setting since the correct value comes +// from the buffer. +func FromBytes(buf *bytes.Reader, options ...tdigestOption) (*TDigest, error) { + var encoding int32 + err := binary.Read(buf, endianess, &encoding) + if err != nil { + return nil, err + } + + if encoding != smallEncoding { + return nil, fmt.Errorf("Unsupported encoding version: %d", encoding) + } + + t, err := newWithoutSummary(options...) + + if err != nil { + return nil, err + } + + var compression float64 + err = binary.Read(buf, endianess, &compression) + if err != nil { + return nil, err + } + + t.compression = compression + + var numCentroids int32 + err = binary.Read(buf, endianess, &numCentroids) + if err != nil { + return nil, err + } + + if numCentroids < 0 || numCentroids > 1<<22 { + return nil, errors.New("bad number of centroids in serialization") + } + + t.summary = newSummary(int(numCentroids)) + t.summary.means = t.summary.means[:numCentroids] + t.summary.counts = t.summary.counts[:numCentroids] + + var x float64 + for i := 0; i < int(numCentroids); i++ { + var delta float32 + err = binary.Read(buf, endianess, &delta) + if err != nil { + return nil, err + } + x += float64(delta) + t.summary.means[i] = x + } + + for i := 0; i < int(numCentroids); i++ { + count, err := decodeUint(buf) + if err != nil { + return nil, err + } + t.summary.counts[i] = count + t.count += count + } + + return t, nil +} + +// FromBytes deserializes into the supplied TDigest struct, re-using +// and overwriting any existing buffers. +// +// This method reinitializes the digest from the provided buffer +// discarding any previously collected data. Notice that in case +// of errors this may leave the digest in a unusable state. +func (t *TDigest) FromBytes(buf []byte) error { + if len(buf) < 16 { + return errors.New("buffer too small for deserialization") + } + + encoding := int32(endianess.Uint32(buf)) + if encoding != smallEncoding { + return fmt.Errorf("unsupported encoding version: %d", encoding) + } + + compression := math.Float64frombits(endianess.Uint64(buf[4:12])) + numCentroids := int(endianess.Uint32(buf[12:16])) + if numCentroids < 0 || numCentroids > 1<<22 { + return errors.New("bad number of centroids in serialization") + } + + if len(buf) < 16+(4*numCentroids) { + return errors.New("buffer too small for deserialization") + } + + t.count = 0 + t.compression = compression + if t.summary == nil || + cap(t.summary.means) < numCentroids || + cap(t.summary.counts) < numCentroids { + t.summary = newSummary(numCentroids) + } + t.summary.means = t.summary.means[:numCentroids] + t.summary.counts = t.summary.counts[:numCentroids] + + idx := 16 + var x float64 + for i := 0; i < numCentroids; i++ { + delta := math.Float32frombits(endianess.Uint32(buf[idx:])) + idx += 4 + x += float64(delta) + t.summary.means[i] = x + } + + for i := 0; i < numCentroids; i++ { + count, read := binary.Uvarint(buf[idx:]) + if read < 1 { + return errors.New("error decoding varint, this TDigest is now invalid") + } + + idx += read + + t.summary.counts[i] = count + t.count += count + } + + if idx != len(buf) { + return errors.New("buffer has unread data") + } + return nil +} + +func encodeUint(buf *bytes.Buffer, n uint64) error { + var b [binary.MaxVarintLen64]byte + + l := binary.PutUvarint(b[:], n) + + _, err := buf.Write(b[:l]) + + return err +} + +func decodeUint(buf *bytes.Reader) (uint64, error) { + v, err := binary.ReadUvarint(buf) + return v, err +} diff --git a/vendor/github.com/caio/go-tdigest/summary.go b/vendor/github.com/caio/go-tdigest/summary.go new file mode 100644 index 000000000..f7c90672e --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/summary.go @@ -0,0 +1,206 @@ +package tdigest + +import ( + "fmt" + "math" + "sort" +) + +type summary struct { + means []float64 + counts []uint64 +} + +func newSummary(initialCapacity int) *summary { + s := &summary{ + means: make([]float64, 0, initialCapacity), + counts: make([]uint64, 0, initialCapacity), + } + return s +} + +func (s *summary) Len() int { + return len(s.means) +} + +func (s *summary) Add(key float64, value uint64) error { + if math.IsNaN(key) { + return fmt.Errorf("Key must not be NaN") + } + if value == 0 { + return fmt.Errorf("Count must be >0") + } + + idx := s.findInsertionIndex(key) + + s.means = append(s.means, math.NaN()) + s.counts = append(s.counts, 0) + + copy(s.means[idx+1:], s.means[idx:]) + copy(s.counts[idx+1:], s.counts[idx:]) + + s.means[idx] = key + s.counts[idx] = value + + return nil +} + +// Always insert to the right +func (s *summary) findInsertionIndex(x float64) int { + // Binary search is only worthwhile if we have a lot of keys. + if len(s.means) < 250 { + for i, mean := range s.means { + if mean > x { + return i + } + } + return len(s.means) + } + + return sort.Search(len(s.means), func(i int) bool { + return s.means[i] > x + }) +} + +// This method is the hotspot when calling Add(), which in turn is called by +// Compress() and Merge(). +func (s *summary) HeadSum(idx int) (sum float64) { + return float64(sumUntilIndex(s.counts, idx)) +} + +func (s *summary) Floor(x float64) int { + return s.findIndex(x) - 1 +} + +func (s *summary) findIndex(x float64) int { + // Binary search is only worthwhile if we have a lot of keys. + if len(s.means) < 250 { + for i, mean := range s.means { + if mean >= x { + return i + } + } + return len(s.means) + } + + return sort.Search(len(s.means), func(i int) bool { + return s.means[i] >= x + }) +} + +func (s *summary) Mean(uncheckedIndex int) float64 { + return s.means[uncheckedIndex] +} + +func (s *summary) Count(uncheckedIndex int) uint64 { + return s.counts[uncheckedIndex] +} + +// return the index of the last item which the sum of counts +// of items before it is less than or equal to `sum`. -1 in +// case no centroid satisfies the requirement. +// Since it's cheap, this also returns the `HeadSum` until +// the found index (i.e. cumSum = HeadSum(FloorSum(x))) +func (s *summary) FloorSum(sum float64) (index int, cumSum float64) { + index = -1 + for i, count := range s.counts { + if cumSum <= sum { + index = i + } else { + break + } + cumSum += float64(count) + } + if index != -1 { + cumSum -= float64(s.counts[index]) + } + return index, cumSum +} + +func (s *summary) setAt(index int, mean float64, count uint64) { + s.means[index] = mean + s.counts[index] = count + s.adjustRight(index) + s.adjustLeft(index) +} + +func (s *summary) adjustRight(index int) { + for i := index + 1; i < len(s.means) && s.means[i-1] > s.means[i]; i++ { + s.means[i-1], s.means[i] = s.means[i], s.means[i-1] + s.counts[i-1], s.counts[i] = s.counts[i], s.counts[i-1] + } +} + +func (s *summary) adjustLeft(index int) { + for i := index - 1; i >= 0 && s.means[i] > s.means[i+1]; i-- { + s.means[i], s.means[i+1] = s.means[i+1], s.means[i] + s.counts[i], s.counts[i+1] = s.counts[i+1], s.counts[i] + } +} + +func (s *summary) ForEach(f func(float64, uint64) bool) { + for i, mean := range s.means { + if !f(mean, s.counts[i]) { + break + } + } +} + +func (s *summary) Perm(rng RNG, f func(float64, uint64) bool) { + for _, i := range perm(rng, s.Len()) { + if !f(s.means[i], s.counts[i]) { + break + } + } +} + +func (s *summary) Clone() *summary { + return &summary{ + means: append([]float64{}, s.means...), + counts: append([]uint64{}, s.counts...), + } +} + +// Randomly shuffles summary contents, so they can be added to another summary +// with being pathological. Renders summary invalid. +func (s *summary) shuffle(rng RNG) { + for i := len(s.means) - 1; i > 1; i-- { + s.Swap(i, rng.Intn(i+1)) + } +} + +// for sort.Interface +func (s *summary) Swap(i, j int) { + s.means[i], s.means[j] = s.means[j], s.means[i] + s.counts[i], s.counts[j] = s.counts[j], s.counts[i] +} + +func (s *summary) Less(i, j int) bool { + return s.means[i] < s.means[j] +} + +// A simple loop unroll saves a surprising amount of time. +func sumUntilIndex(s []uint64, idx int) uint64 { + var cumSum uint64 + var i int + for i = idx - 1; i >= 3; i -= 4 { + cumSum += uint64(s[i]) + cumSum += uint64(s[i-1]) + cumSum += uint64(s[i-2]) + cumSum += uint64(s[i-3]) + } + for ; i >= 0; i-- { + cumSum += uint64(s[i]) + } + return cumSum +} + +func perm(rng RNG, n int) []int { + m := make([]int, n) + for i := 1; i < n; i++ { + j := rng.Intn(i + 1) + m[i] = m[j] + m[j] = i + } + return m +} diff --git a/vendor/github.com/caio/go-tdigest/tdigest.go b/vendor/github.com/caio/go-tdigest/tdigest.go new file mode 100644 index 000000000..e1b932c19 --- /dev/null +++ b/vendor/github.com/caio/go-tdigest/tdigest.go @@ -0,0 +1,445 @@ +// Package tdigest provides a highly accurate mergeable data-structure +// for quantile estimation. +// +// Typical T-Digest use cases involve accumulating metrics on several +// distinct nodes of a cluster and then merging them together to get +// a system-wide quantile overview. Things such as: sensory data from +// IoT devices, quantiles over enormous document datasets (think +// ElasticSearch), performance metrics for distributed systems, etc. +// +// After you create (and configure, if desired) the digest: +// digest, err := tdigest.New(tdigest.Compression(100)) +// +// You can then use it for registering measurements: +// digest.Add(number) +// +// Estimating quantiles: +// digest.Quantile(0.99) +// +// And merging with another digest: +// digest.Merge(otherDigest) +package tdigest + +import ( + "fmt" + "math" +) + +// TDigest is a quantile approximation data structure. +type TDigest struct { + summary *summary + compression float64 + count uint64 + rng RNG +} + +// New creates a new digest. +// +// By default the digest is constructed with a configuration that +// should be useful for most use-cases. It comes with compression +// set to 100 and uses a local random number generator for +// performance reasons. +func New(options ...tdigestOption) (*TDigest, error) { + tdigest, err := newWithoutSummary(options...) + + if err != nil { + return nil, err + } + + tdigest.summary = newSummary(estimateCapacity(tdigest.compression)) + return tdigest, nil +} + +// Creates a tdigest instance without allocating a summary. +func newWithoutSummary(options ...tdigestOption) (*TDigest, error) { + tdigest := &TDigest{ + compression: 100, + count: 0, + rng: newLocalRNG(1), + } + + for _, option := range options { + err := option(tdigest) + if err != nil { + return nil, err + } + } + + return tdigest, nil +} + +func _quantile(index float64, previousIndex float64, nextIndex float64, previousMean float64, nextMean float64) float64 { + delta := nextIndex - previousIndex + previousWeight := (nextIndex - index) / delta + nextWeight := (index - previousIndex) / delta + return previousMean*previousWeight + nextMean*nextWeight +} + +// Compression returns the TDigest compression. +func (t *TDigest) Compression() float64 { + return t.compression +} + +// Quantile returns the desired percentile estimation. +// +// Values of p must be between 0 and 1 (inclusive), will panic otherwise. +func (t *TDigest) Quantile(q float64) float64 { + if q < 0 || q > 1 { + panic("q must be between 0 and 1 (inclusive)") + } + + if t.summary.Len() == 0 { + return math.NaN() + } else if t.summary.Len() == 1 { + return t.summary.Mean(0) + } + + index := q * float64(t.count-1) + previousMean := math.NaN() + previousIndex := float64(0) + next, total := t.summary.FloorSum(index) + + if next > 0 { + previousMean = t.summary.Mean(next - 1) + previousIndex = total - float64(t.summary.Count(next-1)+1)/2 + } + + for { + nextIndex := total + float64(t.summary.Count(next)-1)/2 + if nextIndex >= index { + if math.IsNaN(previousMean) { + // the index is before the 1st centroid + if nextIndex == previousIndex { + return t.summary.Mean(next) + } + // assume linear growth + nextIndex2 := total + float64(t.summary.Count(next)) + float64(t.summary.Count(next+1)-1)/2 + previousMean = (nextIndex2*t.summary.Mean(next) - nextIndex*t.summary.Mean(next+1)) / (nextIndex2 - nextIndex) + } + // common case: two centroids found, the result in in between + return _quantile(index, previousIndex, nextIndex, previousMean, t.summary.Mean(next)) + } else if next+1 == t.summary.Len() { + // the index is after the last centroid + nextIndex2 := float64(t.count - 1) + nextMean2 := (t.summary.Mean(next)*(nextIndex2-previousIndex) - previousMean*(nextIndex2-nextIndex)) / (nextIndex - previousIndex) + return _quantile(index, nextIndex, nextIndex2, t.summary.Mean(next), nextMean2) + } + total += float64(t.summary.Count(next)) + previousMean = t.summary.Mean(next) + previousIndex = nextIndex + next++ + } + // unreachable +} + +// boundedWeightedAverage computes the weighted average of two +// centroids guaranteeing that the result will be between x1 and x2, +// inclusive. +// +// Refer to https://github.com/caio/go-tdigest/pull/19 for more details +func boundedWeightedAverage(x1 float64, w1 float64, x2 float64, w2 float64) float64 { + if x1 > x2 { + x1, x2, w1, w2 = x2, x1, w2, w1 + } + result := (x1*w1 + x2*w2) / (w1 + w2) + return math.Max(x1, math.Min(result, x2)) +} + +// AddWeighted registers a new sample in the digest. +// +// It's the main entry point for the digest and very likely the only +// method to be used for collecting samples. The count parameter is for +// when you are registering a sample that occurred multiple times - the +// most common value for this is 1. +// +// This will emit an error if `value` is NaN of if `count` is zero. +func (t *TDigest) AddWeighted(value float64, count uint64) (err error) { + if count == 0 { + return fmt.Errorf("Illegal datapoint ", value, count) + } + + if t.summary.Len() == 0 { + err = t.summary.Add(value, count) + t.count = uint64(count) + return err + } + + begin := t.summary.Floor(value) + if begin == -1 { + begin = 0 + } + + begin, end := t.findNeighbors(begin, value) + + closest := t.chooseMergeCandidate(begin, end, value, count) + + if closest == t.summary.Len() { + err = t.summary.Add(value, count) + if err != nil { + return err + } + } else { + c := float64(t.summary.Count(closest)) + newMean := boundedWeightedAverage(t.summary.Mean(closest), c, value, float64(count)) + t.summary.setAt(closest, newMean, uint64(c)+count) + } + t.count += uint64(count) + + if float64(t.summary.Len()) > 20*t.compression { + err = t.Compress() + } + + return err +} + +// Count returns the total number of samples this digest represents +// +// The result represents how many times Add() was called on a digest +// plus how many samples the digests it has been merged with had. +// This is useful mainly for two scenarios: +// +// - Knowing if there is enough data so you can trust the quantiles +// +// - Knowing if you've registered too many samples already and +// deciding what to do about it. +// +// For the second case one approach would be to create a side empty +// digest and start registering samples on it as well as on the old +// (big) one and then discard the bigger one after a certain criterion +// is reached (say, minimum number of samples or a small relative +// error between new and old digests). +func (t TDigest) Count() uint64 { + return t.count +} + +// Add is an alias for AddWeighted(x,1) +// Read the documentation for AddWeighted for more details. +func (t *TDigest) Add(value float64) error { + return t.AddWeighted(value, 1) +} + +// Compress tries to reduce the number of individual centroids stored +// in the digest. +// +// Compression trades off accuracy for performance and happens +// automatically after a certain amount of distinct samples have been +// stored. +// +// At any point in time you may call Compress on a digest, but you +// may completely ignore this and it will compress itself automatically +// after it grows too much. If you are minimizing network traffic +// it might be a good idea to compress before serializing. +func (t *TDigest) Compress() (err error) { + if t.summary.Len() <= 1 { + return nil + } + + oldTree := t.summary + t.summary = newSummary(estimateCapacity(t.compression)) + t.count = 0 + + oldTree.shuffle(t.rng) + oldTree.ForEach(func(mean float64, count uint64) bool { + err = t.AddWeighted(mean, count) + return err == nil + }) + return err +} + +// Merge joins a given digest into itself. +// +// Merging is useful when you have multiple TDigest instances running +// in separate threads and you want to compute quantiles over all the +// samples. This is particularly important on a scatter-gather/map-reduce +// scenario. +func (t *TDigest) Merge(other *TDigest) (err error) { + if other.summary.Len() == 0 { + return nil + } + + other.summary.Perm(t.rng, func(mean float64, count uint64) bool { + err = t.AddWeighted(mean, count) + return err == nil + }) + return err +} + +// MergeDestructive joins a given digest into itself rendering +// the other digest invalid. +// +// This works as Merge above but its faster. Using this method +// requires caution as it makes 'other' useless - you must make +// sure you discard it without making further uses of it. +func (t *TDigest) MergeDestructive(other *TDigest) (err error) { + if other.summary.Len() == 0 { + return nil + } + + other.summary.shuffle(t.rng) + other.summary.ForEach(func(mean float64, count uint64) bool { + err = t.AddWeighted(mean, count) + return err == nil + }) + return err +} + +// CDF computes the fraction in which all samples are less than +// or equal to the given value. +func (t *TDigest) CDF(value float64) float64 { + if t.summary.Len() == 0 { + return math.NaN() + } else if t.summary.Len() == 1 { + if value < t.summary.Mean(0) { + return 0 + } + return 1 + } + + // We have at least 2 centroids + left := (t.summary.Mean(1) - t.summary.Mean(0)) / 2 + right := left + tot := 0.0 + + for i := 1; i < t.summary.Len()-1; i++ { + prevMean := t.summary.Mean(i - 1) + if value < prevMean+right { + v := (tot + float64(t.summary.Count(i-1))*interpolate(value, prevMean-left, prevMean+right)) / float64(t.Count()) + if v > 0 { + return v + } + return 0 + } + + tot += float64(t.summary.Count(i - 1)) + left = right + right = (t.summary.Mean(i+1) - t.summary.Mean(i)) / 2 + } + + // last centroid, the summary length is at least two + aIdx := t.summary.Len() - 2 + aMean := t.summary.Mean(aIdx) + if value < aMean+right { + aCount := float64(t.summary.Count(aIdx)) + return (tot + aCount*interpolate(value, aMean-left, aMean+right)) / float64(t.Count()) + } + return 1 +} + +// Clone returns a deep copy of a TDigest. +func (t *TDigest) Clone() *TDigest { + return &TDigest{ + summary: t.summary.Clone(), + compression: t.compression, + count: t.count, + rng: t.rng, + } +} + +func interpolate(x, x0, x1 float64) float64 { + return (x - x0) / (x1 - x0) +} + +// ForEachCentroid calls the specified function for each centroid. +// +// Iteration stops when the supplied function returns false, or when all +// centroids have been iterated. +func (t *TDigest) ForEachCentroid(f func(mean float64, count uint64) bool) { + t.summary.ForEach(f) +} + +func (t TDigest) findNeighbors(start int, value float64) (int, int) { + minDistance := math.MaxFloat64 + lastNeighbor := t.summary.Len() + for neighbor := start; neighbor < t.summary.Len(); neighbor++ { + z := math.Abs(t.summary.Mean(neighbor) - value) + if z < minDistance { + start = neighbor + minDistance = z + } else if z > minDistance { + lastNeighbor = neighbor + break + } + } + return start, lastNeighbor +} + +func (t TDigest) chooseMergeCandidate(begin, end int, value float64, count uint64) int { + closest := t.summary.Len() + sum := t.summary.HeadSum(begin) + var n float32 + + for neighbor := begin; neighbor != end; neighbor++ { + c := float64(t.summary.Count(neighbor)) + var q float64 + if t.count == 1 { + q = 0.5 + } else { + q = (sum + (c-1)/2) / float64(t.count-1) + } + k := 4 * float64(t.count) * q * (1 - q) / t.compression + + if c+float64(count) <= k { + n++ + if t.rng.Float32() < 1/n { + closest = neighbor + } + } + sum += c + } + return closest +} + +// TrimmedMean returns the mean of the distribution between the two +// percentiles p1 and p2. +// +// Values of p1 and p2 must be beetween 0 and 1 (inclusive) and p1 +// must be less than p2. Will panic otherwise. +func (t *TDigest) TrimmedMean(p1, p2 float64) float64 { + if p1 < 0 || p1 > 1 { + panic("p1 must be between 0 and 1 (inclusive)") + } + if p2 < 0 || p2 > 1 { + panic("p2 must be between 0 and 1 (inclusive)") + } + if p1 >= p2 { + panic("p1 must be lower than p2") + } + + minCount := p1 * float64(t.count) + maxCount := p2 * float64(t.count) + + var trimmedSum, trimmedCount, currCount float64 + for i, mean := range t.summary.means { + count := float64(t.summary.counts[i]) + + nextCount := currCount + count + if nextCount <= minCount { + currCount = nextCount + continue + } + + if currCount < minCount { + count = nextCount - minCount + } + if nextCount > maxCount { + count -= nextCount - maxCount + } + + trimmedSum += count * mean + trimmedCount += count + + if nextCount >= maxCount { + break + } + currCount = nextCount + } + + if trimmedCount == 0 { + return 0 + } + return trimmedSum / trimmedCount +} + +func estimateCapacity(compression float64) int { + return int(compression) * 10 +} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml new file mode 100644 index 000000000..c516ea88d --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - "1.x" + - master +env: + - TAGS="" + - TAGS="-tags purego" +script: go test $TAGS -v ./... diff --git a/vendor/github.com/steveyen/gtreap/LICENSE b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt similarity index 89% rename from vendor/github.com/steveyen/gtreap/LICENSE rename to vendor/github.com/cespare/xxhash/v2/LICENSE.txt index 26656306f..24b53065f 100644 --- a/vendor/github.com/steveyen/gtreap/LICENSE +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -1,4 +1,6 @@ -Copyright (C) 2012 Steve Yen +Copyright (c) 2016 Caleb Spare + +MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the @@ -17,4 +19,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 000000000..2fd8693c2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,67 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 000000000..db0b35fbe --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,236 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 000000000..ad14b807f --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 000000000..d580e32ae --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ b_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 000000000..4a5a82160 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 000000000..fc9bea7a3 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 000000000..53bf76efb --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,46 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Notes: +// +// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ +// for some discussion about these unsafe conversions. +// +// In the future it's possible that compiler optimizations will make these +// unsafe operations unnecessary: https://golang.org/issue/2205. +// +// Both of these wrapper functions still incur function call overhead since they +// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write +// for strings to squeeze out a bit more speed. Mid-stack inlining should +// eventually fix this. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return d.Write(b) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml deleted file mode 100644 index 1027f56cd..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -script: - - go vet ./... - - go test -v ./... - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md deleted file mode 100644 index 7fc1f793c..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md +++ /dev/null @@ -1,97 +0,0 @@ -## Migration Guide from v2 -> v3 - -Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. - -### `Token.Claims` is now an interface type - -The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. - -`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. - -The old example for parsing a token looked like this.. - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is now directly mapped to... - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. - -```go - type MyCustomClaims struct { - User string - *StandardClaims - } - - if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { - claims := token.Claims.(*MyCustomClaims) - fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) - } -``` - -### `ParseFromRequest` has been moved - -To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. - -`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. - -This simple parsing example: - -```go - if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is directly mapped to: - -```go - if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -There are several concrete `Extractor` types provided for your convenience: - -* `HeaderExtractor` will search a list of headers until one contains content. -* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. -* `MultiExtractor` will try a list of `Extractors` in order until one returns content. -* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. -* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument -* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header - - -### RSA signing methods no longer accept `[]byte` keys - -Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. - -To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. - -```go - func keyLookupFunc(*Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - - // Look up key - key, err := lookupPublicKey(token.Header["kid"]) - if err != nil { - return nil, err - } - - // Unpack key from PEM encoded PKCS8 - return jwt.ParseRSAPublicKeyFromPEM(key) - } -``` diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go deleted file mode 100644 index f0228f02e..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/claims.go +++ /dev/null @@ -1,134 +0,0 @@ -package jwt - -import ( - "crypto/subtle" - "fmt" - "time" -) - -// For a type to be a Claims object, it must just have a Valid method that determines -// if the token is invalid for any supported reason -type Claims interface { - Valid() error -} - -// Structured version of Claims Section, as referenced at -// https://tools.ietf.org/html/rfc7519#section-4.1 -// See examples for how to use this with your own claim types -type StandardClaims struct { - Audience string `json:"aud,omitempty"` - ExpiresAt int64 `json:"exp,omitempty"` - Id string `json:"jti,omitempty"` - IssuedAt int64 `json:"iat,omitempty"` - Issuer string `json:"iss,omitempty"` - NotBefore int64 `json:"nbf,omitempty"` - Subject string `json:"sub,omitempty"` -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (c StandardClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - // The claims below are optional, by default, so if they are set to the - // default value in Go, let's not fail the verification for them. - if c.VerifyExpiresAt(now, false) == false { - delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) - vErr.Errors |= ValidationErrorExpired - } - - if c.VerifyIssuedAt(now, false) == false { - vErr.Inner = fmt.Errorf("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if c.VerifyNotBefore(now, false) == false { - vErr.Inner = fmt.Errorf("token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud(c.Audience, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { - return verifyExp(c.ExpiresAt, cmp, req) -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { - return verifyIat(c.IssuedAt, cmp, req) -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { - return verifyIss(c.Issuer, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { - return verifyNbf(c.NotBefore, cmp, req) -} - -// ----- helpers - -func verifyAud(aud string, cmp string, required bool) bool { - if aud == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyExp(exp int64, now int64, required bool) bool { - if exp == 0 { - return !required - } - return now <= exp -} - -func verifyIat(iat int64, now int64, required bool) bool { - if iat == 0 { - return !required - } - return now >= iat -} - -func verifyIss(iss string, cmp string, required bool) bool { - if iss == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyNbf(nbf int64, now int64, required bool) bool { - if nbf == 0 { - return !required - } - return now >= nbf -} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go deleted file mode 100644 index 291213c46..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/map_claims.go +++ /dev/null @@ -1,94 +0,0 @@ -package jwt - -import ( - "encoding/json" - "errors" - // "fmt" -) - -// Claims type that uses the map[string]interface{} for JSON decoding -// This is the default claims type if you don't supply one -type MapClaims map[string]interface{} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyAudience(cmp string, req bool) bool { - aud, _ := m["aud"].(string) - return verifyAud(aud, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { - switch exp := m["exp"].(type) { - case float64: - return verifyExp(int64(exp), cmp, req) - case json.Number: - v, _ := exp.Int64() - return verifyExp(v, cmp, req) - } - return req == false -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { - switch iat := m["iat"].(type) { - case float64: - return verifyIat(int64(iat), cmp, req) - case json.Number: - v, _ := iat.Int64() - return verifyIat(v, cmp, req) - } - return req == false -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { - iss, _ := m["iss"].(string) - return verifyIss(iss, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { - switch nbf := m["nbf"].(type) { - case float64: - return verifyNbf(int64(nbf), cmp, req) - case json.Number: - v, _ := nbf.Int64() - return verifyNbf(v, cmp, req) - } - return req == false -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (m MapClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - if m.VerifyExpiresAt(now, false) == false { - vErr.Inner = errors.New("Token is expired") - vErr.Errors |= ValidationErrorExpired - } - - if m.VerifyIssuedAt(now, false) == false { - vErr.Inner = errors.New("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if m.VerifyNotBefore(now, false) == false { - vErr.Inner = errors.New("Token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} diff --git a/vendor/github.com/dgryski/go-metro/LICENSE b/vendor/github.com/dgryski/go-metro/LICENSE new file mode 100644 index 000000000..6243b617c --- /dev/null +++ b/vendor/github.com/dgryski/go-metro/LICENSE @@ -0,0 +1,24 @@ +This package is a mechanical translation of the reference C++ code for +MetroHash, available at https://github.com/jandrewrogers/MetroHash + +The MIT License (MIT) + +Copyright (c) 2016 Damian Gryski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/dgryski/go-metro/README b/vendor/github.com/dgryski/go-metro/README new file mode 100644 index 000000000..5ecebb385 --- /dev/null +++ b/vendor/github.com/dgryski/go-metro/README @@ -0,0 +1,6 @@ +MetroHash + +This package is a mechanical translation of the reference C++ code for +MetroHash, available at https://github.com/jandrewrogers/MetroHash + +I claim no additional copyright over the original implementation. diff --git a/vendor/github.com/dgryski/go-metro/metro.py b/vendor/github.com/dgryski/go-metro/metro.py new file mode 100644 index 000000000..8dd4d26e6 --- /dev/null +++ b/vendor/github.com/dgryski/go-metro/metro.py @@ -0,0 +1,199 @@ +import peachpy.x86_64 + +k0 = 0xD6D018F5 +k1 = 0xA2AA033B +k2 = 0x62992FC1 +k3 = 0x30BC5B29 + +def advance(p,l,c): + ADD(p,c) + SUB(l,c) + +def imul(r,k): + t = GeneralPurposeRegister64() + MOV(t, k) + IMUL(r, t) + +def update32(v, p,idx, k, vadd): + r = GeneralPurposeRegister64() + MOV(r, [p + idx]) + imul(r, k) + ADD(v, r) + ROR(v, 29) + ADD(v, vadd) + +def final32(v, regs, keys): + r = GeneralPurposeRegister64() + MOV(r, v[regs[1]]) + ADD(r, v[regs[2]]) + imul(r, keys[0]) + ADD(r, v[regs[3]]) + ROR(r, 37) + imul(r, keys[1]) + XOR(v[regs[0]], r) + +seed = Argument(uint64_t) +buffer_base = Argument(ptr()) +buffer_len = Argument(int64_t) +buffer_cap = Argument(int64_t) + +def makeHash(name, args): + with Function(name, args, uint64_t) as function: + + reg_ptr = GeneralPurposeRegister64() + reg_ptr_len = GeneralPurposeRegister64() + reg_hash = GeneralPurposeRegister64() + + LOAD.ARGUMENT(reg_hash, seed) + LOAD.ARGUMENT(reg_ptr, buffer_base) + LOAD.ARGUMENT(reg_ptr_len, buffer_len) + + imul(reg_hash, k0) + r = GeneralPurposeRegister64() + MOV(r, k2*k0) + ADD(reg_hash, r) + + after32 = Label("after32") + + CMP(reg_ptr_len, 32) + JL(after32) + v = [GeneralPurposeRegister64() for _ in range(4)] + for i in range(4): + MOV(v[i], reg_hash) + + with Loop() as loop: + update32(v[0], reg_ptr, 0, k0, v[2]) + update32(v[1], reg_ptr, 8, k1, v[3]) + update32(v[2], reg_ptr, 16, k2, v[0]) + update32(v[3], reg_ptr, 24, k3, v[1]) + + ADD(reg_ptr, 32) + SUB(reg_ptr_len, 32) + CMP(reg_ptr_len, 32) + JGE(loop.begin) + + final32(v, [2,0,3,1], [k0, k1]) + final32(v, [3,1,2,0], [k1, k0]) + final32(v, [0,0,2,3], [k0, k1]) + final32(v, [1,1,3,2], [k1, k0]) + + XOR(v[0], v[1]) + ADD(reg_hash, v[0]) + + LABEL(after32) + + after16 = Label("after16") + CMP(reg_ptr_len, 16) + JL(after16) + + for i in range(2): + MOV(v[i], [reg_ptr]) + imul(v[i], k2) + ADD(v[i], reg_hash) + + advance(reg_ptr, reg_ptr_len, 8) + + ROR(v[i], 29) + imul(v[i], k3) + + r = GeneralPurposeRegister64() + MOV(r, v[0]) + imul(r, k0) + ROR(r, 21) + ADD(r, v[1]) + XOR(v[0], r) + + MOV(r, v[1]) + imul(r, k3) + ROR(r, 21) + ADD(r, v[0]) + XOR(v[1], r) + + ADD(reg_hash, v[1]) + + LABEL(after16) + + after8 = Label("after8") + CMP(reg_ptr_len, 8) + JL(after8) + + r = GeneralPurposeRegister64() + MOV(r, [reg_ptr]) + imul(r, k3) + ADD(reg_hash, r) + advance(reg_ptr, reg_ptr_len, 8) + + MOV(r, reg_hash) + ROR(r, 55) + imul(r, k1) + XOR(reg_hash, r) + + LABEL(after8) + + after4 = Label("after4") + CMP(reg_ptr_len, 4) + JL(after4) + + r = GeneralPurposeRegister64() + XOR(r, r) + MOV(r.as_dword, dword[reg_ptr]) + imul(r, k3) + ADD(reg_hash, r) + advance(reg_ptr, reg_ptr_len, 4) + + MOV(r, reg_hash) + ROR(r, 26) + imul(r, k1) + XOR(reg_hash, r) + + LABEL(after4) + + after2 = Label("after2") + CMP(reg_ptr_len, 2) + JL(after2) + + r = GeneralPurposeRegister64() + XOR(r,r) + MOV(r.as_word, word[reg_ptr]) + imul(r, k3) + ADD(reg_hash, r) + advance(reg_ptr, reg_ptr_len, 2) + + MOV(r, reg_hash) + ROR(r, 48) + imul(r, k1) + XOR(reg_hash, r) + + LABEL(after2) + + after1 = Label("after1") + CMP(reg_ptr_len, 1) + JL(after1) + + r = GeneralPurposeRegister64() + MOVZX(r, byte[reg_ptr]) + imul(r, k3) + ADD(reg_hash, r) + + MOV(r, reg_hash) + ROR(r, 37) + imul(r, k1) + XOR(reg_hash, r) + + LABEL(after1) + + r = GeneralPurposeRegister64() + MOV(r, reg_hash) + ROR(r, 28) + XOR(reg_hash, r) + + imul(reg_hash, k0) + + MOV(r, reg_hash) + ROR(r, 29) + XOR(reg_hash, r) + + RETURN(reg_hash) + +makeHash("Hash64", (buffer_base, buffer_len, buffer_cap, seed)) +makeHash("Hash64Str", (buffer_base, buffer_len, seed)) \ No newline at end of file diff --git a/vendor/github.com/dgryski/go-metro/metro128.go b/vendor/github.com/dgryski/go-metro/metro128.go new file mode 100644 index 000000000..e8dd8ddbf --- /dev/null +++ b/vendor/github.com/dgryski/go-metro/metro128.go @@ -0,0 +1,94 @@ +package metro + +import "encoding/binary" + +func rotate_right(v uint64, k uint) uint64 { + return (v >> k) | (v << (64 - k)) +} + +func Hash128(buffer []byte, seed uint64) (uint64, uint64) { + + const ( + k0 = 0xC83A91E1 + k1 = 0x8648DBDB + k2 = 0x7BDEC03B + k3 = 0x2F5870A5 + ) + + ptr := buffer + + var v [4]uint64 + + v[0] = (seed - k0) * k3 + v[1] = (seed + k1) * k2 + + if len(ptr) >= 32 { + v[2] = (seed + k0) * k2 + v[3] = (seed - k1) * k3 + + for len(ptr) >= 32 { + v[0] += binary.LittleEndian.Uint64(ptr) * k0 + ptr = ptr[8:] + v[0] = rotate_right(v[0], 29) + v[2] + v[1] += binary.LittleEndian.Uint64(ptr) * k1 + ptr = ptr[8:] + v[1] = rotate_right(v[1], 29) + v[3] + v[2] += binary.LittleEndian.Uint64(ptr) * k2 + ptr = ptr[8:] + v[2] = rotate_right(v[2], 29) + v[0] + v[3] += binary.LittleEndian.Uint64(ptr) * k3 + ptr = ptr[8:] + v[3] = rotate_right(v[3], 29) + v[1] + } + + v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 21) * k1 + v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 21) * k0 + v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 21) * k1 + v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 21) * k0 + } + + if len(ptr) >= 16 { + v[0] += binary.LittleEndian.Uint64(ptr) * k2 + ptr = ptr[8:] + v[0] = rotate_right(v[0], 33) * k3 + v[1] += binary.LittleEndian.Uint64(ptr) * k2 + ptr = ptr[8:] + v[1] = rotate_right(v[1], 33) * k3 + v[0] ^= rotate_right((v[0]*k2)+v[1], 45) * k1 + v[1] ^= rotate_right((v[1]*k3)+v[0], 45) * k0 + } + + if len(ptr) >= 8 { + v[0] += binary.LittleEndian.Uint64(ptr) * k2 + ptr = ptr[8:] + v[0] = rotate_right(v[0], 33) * k3 + v[0] ^= rotate_right((v[0]*k2)+v[1], 27) * k1 + } + + if len(ptr) >= 4 { + v[1] += uint64(binary.LittleEndian.Uint32(ptr)) * k2 + ptr = ptr[4:] + v[1] = rotate_right(v[1], 33) * k3 + v[1] ^= rotate_right((v[1]*k3)+v[0], 46) * k0 + } + + if len(ptr) >= 2 { + v[0] += uint64(binary.LittleEndian.Uint16(ptr)) * k2 + ptr = ptr[2:] + v[0] = rotate_right(v[0], 33) * k3 + v[0] ^= rotate_right((v[0]*k2)+v[1], 22) * k1 + } + + if len(ptr) >= 1 { + v[1] += uint64(ptr[0]) * k2 + v[1] = rotate_right(v[1], 33) * k3 + v[1] ^= rotate_right((v[1]*k3)+v[0], 58) * k0 + } + + v[0] += rotate_right((v[0]*k0)+v[1], 13) + v[1] += rotate_right((v[1]*k1)+v[0], 37) + v[0] += rotate_right((v[0]*k2)+v[1], 13) + v[1] += rotate_right((v[1]*k3)+v[0], 37) + + return v[0], v[1] +} diff --git a/vendor/github.com/dgryski/go-metro/metro64.go b/vendor/github.com/dgryski/go-metro/metro64.go new file mode 100644 index 000000000..7901ab6c6 --- /dev/null +++ b/vendor/github.com/dgryski/go-metro/metro64.go @@ -0,0 +1,85 @@ +// +build noasm !amd64 gccgo + +package metro + +import "encoding/binary" + +func Hash64(buffer []byte, seed uint64) uint64 { + + const ( + k0 = 0xD6D018F5 + k1 = 0xA2AA033B + k2 = 0x62992FC1 + k3 = 0x30BC5B29 + ) + + ptr := buffer + + hash := (seed + k2) * k0 + + if len(ptr) >= 32 { + v := [4]uint64{hash, hash, hash, hash} + + for len(ptr) >= 32 { + v[0] += binary.LittleEndian.Uint64(ptr[:8]) * k0 + v[0] = rotate_right(v[0], 29) + v[2] + v[1] += binary.LittleEndian.Uint64(ptr[8:16]) * k1 + v[1] = rotate_right(v[1], 29) + v[3] + v[2] += binary.LittleEndian.Uint64(ptr[16:24]) * k2 + v[2] = rotate_right(v[2], 29) + v[0] + v[3] += binary.LittleEndian.Uint64(ptr[24:32]) * k3 + v[3] = rotate_right(v[3], 29) + v[1] + ptr = ptr[32:] + } + + v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 37) * k1 + v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 37) * k0 + v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 37) * k1 + v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 37) * k0 + hash += v[0] ^ v[1] + } + + if len(ptr) >= 16 { + v0 := hash + (binary.LittleEndian.Uint64(ptr[:8]) * k2) + v0 = rotate_right(v0, 29) * k3 + v1 := hash + (binary.LittleEndian.Uint64(ptr[8:16]) * k2) + v1 = rotate_right(v1, 29) * k3 + v0 ^= rotate_right(v0*k0, 21) + v1 + v1 ^= rotate_right(v1*k3, 21) + v0 + hash += v1 + ptr = ptr[16:] + } + + if len(ptr) >= 8 { + hash += binary.LittleEndian.Uint64(ptr[:8]) * k3 + ptr = ptr[8:] + hash ^= rotate_right(hash, 55) * k1 + } + + if len(ptr) >= 4 { + hash += uint64(binary.LittleEndian.Uint32(ptr[:4])) * k3 + hash ^= rotate_right(hash, 26) * k1 + ptr = ptr[4:] + } + + if len(ptr) >= 2 { + hash += uint64(binary.LittleEndian.Uint16(ptr[:2])) * k3 + ptr = ptr[2:] + hash ^= rotate_right(hash, 48) * k1 + } + + if len(ptr) >= 1 { + hash += uint64(ptr[0]) * k3 + hash ^= rotate_right(hash, 37) * k1 + } + + hash ^= rotate_right(hash, 28) + hash *= k0 + hash ^= rotate_right(hash, 29) + + return hash +} + +func Hash64Str(buffer string, seed uint64) uint64 { + return Hash64([]byte(buffer), seed) +} diff --git a/vendor/github.com/dgryski/go-metro/metro_amd64.s b/vendor/github.com/dgryski/go-metro/metro_amd64.s new file mode 100644 index 000000000..7fa4730a4 --- /dev/null +++ b/vendor/github.com/dgryski/go-metro/metro_amd64.s @@ -0,0 +1,372 @@ +// +build !noasm +// +build !gccgo + +// Generated by PeachPy 0.2.0 from metro.py + +// func Hash64(buffer_base uintptr, buffer_len int64, buffer_cap int64, seed uint64) uint64 +TEXT ·Hash64(SB),4,$0-40 + MOVQ seed+24(FP), AX + MOVQ buffer_base+0(FP), BX + MOVQ buffer_len+8(FP), CX + MOVQ $3603962101, DX + IMULQ DX, AX + MOVQ $5961697176435608501, DX + ADDQ DX, AX + CMPQ CX, $32 + JLT after32 + MOVQ AX, DX + MOVQ AX, DI + MOVQ AX, SI + MOVQ AX, BP +loop_begin: + MOVQ 0(BX), R8 + MOVQ $3603962101, R9 + IMULQ R9, R8 + ADDQ R8, DX + RORQ $29, DX + ADDQ SI, DX + MOVQ 8(BX), R8 + MOVQ $2729050939, R9 + IMULQ R9, R8 + ADDQ R8, DI + RORQ $29, DI + ADDQ BP, DI + MOVQ 16(BX), R8 + MOVQ $1654206401, R9 + IMULQ R9, R8 + ADDQ R8, SI + RORQ $29, SI + ADDQ DX, SI + MOVQ 24(BX), R8 + MOVQ $817650473, R9 + IMULQ R9, R8 + ADDQ R8, BP + RORQ $29, BP + ADDQ DI, BP + ADDQ $32, BX + SUBQ $32, CX + CMPQ CX, $32 + JGE loop_begin + MOVQ DX, R8 + ADDQ BP, R8 + MOVQ $3603962101, R9 + IMULQ R9, R8 + ADDQ DI, R8 + RORQ $37, R8 + MOVQ $2729050939, R9 + IMULQ R9, R8 + XORQ R8, SI + MOVQ DI, R8 + ADDQ SI, R8 + MOVQ $2729050939, R9 + IMULQ R9, R8 + ADDQ DX, R8 + RORQ $37, R8 + MOVQ $3603962101, R9 + IMULQ R9, R8 + XORQ R8, BP + MOVQ DX, R8 + ADDQ SI, R8 + MOVQ $3603962101, R9 + IMULQ R9, R8 + ADDQ BP, R8 + RORQ $37, R8 + MOVQ $2729050939, R9 + IMULQ R9, R8 + XORQ R8, DX + MOVQ DI, R8 + ADDQ BP, R8 + MOVQ $2729050939, BP + IMULQ BP, R8 + ADDQ SI, R8 + RORQ $37, R8 + MOVQ $3603962101, SI + IMULQ SI, R8 + XORQ R8, DI + XORQ DI, DX + ADDQ DX, AX +after32: + CMPQ CX, $16 + JLT after16 + MOVQ 0(BX), DX + MOVQ $1654206401, DI + IMULQ DI, DX + ADDQ AX, DX + ADDQ $8, BX + SUBQ $8, CX + RORQ $29, DX + MOVQ $817650473, DI + IMULQ DI, DX + MOVQ 0(BX), DI + MOVQ $1654206401, SI + IMULQ SI, DI + ADDQ AX, DI + ADDQ $8, BX + SUBQ $8, CX + RORQ $29, DI + MOVQ $817650473, SI + IMULQ SI, DI + MOVQ DX, SI + MOVQ $3603962101, BP + IMULQ BP, SI + RORQ $21, SI + ADDQ DI, SI + XORQ SI, DX + MOVQ DI, SI + MOVQ $817650473, BP + IMULQ BP, SI + RORQ $21, SI + ADDQ DX, SI + XORQ SI, DI + ADDQ DI, AX +after16: + CMPQ CX, $8 + JLT after8 + MOVQ 0(BX), DX + MOVQ $817650473, DI + IMULQ DI, DX + ADDQ DX, AX + ADDQ $8, BX + SUBQ $8, CX + MOVQ AX, DX + RORQ $55, DX + MOVQ $2729050939, DI + IMULQ DI, DX + XORQ DX, AX +after8: + CMPQ CX, $4 + JLT after4 + XORQ DX, DX + MOVL 0(BX), DX + MOVQ $817650473, DI + IMULQ DI, DX + ADDQ DX, AX + ADDQ $4, BX + SUBQ $4, CX + MOVQ AX, DX + RORQ $26, DX + MOVQ $2729050939, DI + IMULQ DI, DX + XORQ DX, AX +after4: + CMPQ CX, $2 + JLT after2 + XORQ DX, DX + MOVW 0(BX), DX + MOVQ $817650473, DI + IMULQ DI, DX + ADDQ DX, AX + ADDQ $2, BX + SUBQ $2, CX + MOVQ AX, DX + RORQ $48, DX + MOVQ $2729050939, DI + IMULQ DI, DX + XORQ DX, AX +after2: + CMPQ CX, $1 + JLT after1 + MOVBQZX 0(BX), BX + MOVQ $817650473, CX + IMULQ CX, BX + ADDQ BX, AX + MOVQ AX, BX + RORQ $37, BX + MOVQ $2729050939, CX + IMULQ CX, BX + XORQ BX, AX +after1: + MOVQ AX, BX + RORQ $28, BX + XORQ BX, AX + MOVQ $3603962101, BX + IMULQ BX, AX + MOVQ AX, BX + RORQ $29, BX + XORQ BX, AX + MOVQ AX, ret+32(FP) + RET + +// func Hash64Str(buffer_base uintptr, buffer_len int64, seed uint64) uint64 +TEXT ·Hash64Str(SB),4,$0-32 + MOVQ seed+16(FP), AX + MOVQ buffer_base+0(FP), BX + MOVQ buffer_len+8(FP), CX + MOVQ $3603962101, DX + IMULQ DX, AX + MOVQ $5961697176435608501, DX + ADDQ DX, AX + CMPQ CX, $32 + JLT after32 + MOVQ AX, DX + MOVQ AX, DI + MOVQ AX, SI + MOVQ AX, BP +loop_begin: + MOVQ 0(BX), R8 + MOVQ $3603962101, R9 + IMULQ R9, R8 + ADDQ R8, DX + RORQ $29, DX + ADDQ SI, DX + MOVQ 8(BX), R8 + MOVQ $2729050939, R9 + IMULQ R9, R8 + ADDQ R8, DI + RORQ $29, DI + ADDQ BP, DI + MOVQ 16(BX), R8 + MOVQ $1654206401, R9 + IMULQ R9, R8 + ADDQ R8, SI + RORQ $29, SI + ADDQ DX, SI + MOVQ 24(BX), R8 + MOVQ $817650473, R9 + IMULQ R9, R8 + ADDQ R8, BP + RORQ $29, BP + ADDQ DI, BP + ADDQ $32, BX + SUBQ $32, CX + CMPQ CX, $32 + JGE loop_begin + MOVQ DX, R8 + ADDQ BP, R8 + MOVQ $3603962101, R9 + IMULQ R9, R8 + ADDQ DI, R8 + RORQ $37, R8 + MOVQ $2729050939, R9 + IMULQ R9, R8 + XORQ R8, SI + MOVQ DI, R8 + ADDQ SI, R8 + MOVQ $2729050939, R9 + IMULQ R9, R8 + ADDQ DX, R8 + RORQ $37, R8 + MOVQ $3603962101, R9 + IMULQ R9, R8 + XORQ R8, BP + MOVQ DX, R8 + ADDQ SI, R8 + MOVQ $3603962101, R9 + IMULQ R9, R8 + ADDQ BP, R8 + RORQ $37, R8 + MOVQ $2729050939, R9 + IMULQ R9, R8 + XORQ R8, DX + MOVQ DI, R8 + ADDQ BP, R8 + MOVQ $2729050939, BP + IMULQ BP, R8 + ADDQ SI, R8 + RORQ $37, R8 + MOVQ $3603962101, SI + IMULQ SI, R8 + XORQ R8, DI + XORQ DI, DX + ADDQ DX, AX +after32: + CMPQ CX, $16 + JLT after16 + MOVQ 0(BX), DX + MOVQ $1654206401, DI + IMULQ DI, DX + ADDQ AX, DX + ADDQ $8, BX + SUBQ $8, CX + RORQ $29, DX + MOVQ $817650473, DI + IMULQ DI, DX + MOVQ 0(BX), DI + MOVQ $1654206401, SI + IMULQ SI, DI + ADDQ AX, DI + ADDQ $8, BX + SUBQ $8, CX + RORQ $29, DI + MOVQ $817650473, SI + IMULQ SI, DI + MOVQ DX, SI + MOVQ $3603962101, BP + IMULQ BP, SI + RORQ $21, SI + ADDQ DI, SI + XORQ SI, DX + MOVQ DI, SI + MOVQ $817650473, BP + IMULQ BP, SI + RORQ $21, SI + ADDQ DX, SI + XORQ SI, DI + ADDQ DI, AX +after16: + CMPQ CX, $8 + JLT after8 + MOVQ 0(BX), DX + MOVQ $817650473, DI + IMULQ DI, DX + ADDQ DX, AX + ADDQ $8, BX + SUBQ $8, CX + MOVQ AX, DX + RORQ $55, DX + MOVQ $2729050939, DI + IMULQ DI, DX + XORQ DX, AX +after8: + CMPQ CX, $4 + JLT after4 + XORQ DX, DX + MOVL 0(BX), DX + MOVQ $817650473, DI + IMULQ DI, DX + ADDQ DX, AX + ADDQ $4, BX + SUBQ $4, CX + MOVQ AX, DX + RORQ $26, DX + MOVQ $2729050939, DI + IMULQ DI, DX + XORQ DX, AX +after4: + CMPQ CX, $2 + JLT after2 + XORQ DX, DX + MOVW 0(BX), DX + MOVQ $817650473, DI + IMULQ DI, DX + ADDQ DX, AX + ADDQ $2, BX + SUBQ $2, CX + MOVQ AX, DX + RORQ $48, DX + MOVQ $2729050939, DI + IMULQ DI, DX + XORQ DX, AX +after2: + CMPQ CX, $1 + JLT after1 + MOVBQZX 0(BX), BX + MOVQ $817650473, CX + IMULQ CX, BX + ADDQ BX, AX + MOVQ AX, BX + RORQ $37, BX + MOVQ $2729050939, CX + IMULQ CX, BX + XORQ BX, AX +after1: + MOVQ AX, BX + RORQ $28, BX + XORQ BX, AX + MOVQ $3603962101, BX + IMULQ BX, AX + MOVQ AX, BX + RORQ $29, BX + XORQ BX, AX + MOVQ AX, ret+24(FP) + RET diff --git a/vendor/github.com/dgryski/go-metro/metro_stub.go b/vendor/github.com/dgryski/go-metro/metro_stub.go new file mode 100644 index 000000000..86ddcb470 --- /dev/null +++ b/vendor/github.com/dgryski/go-metro/metro_stub.go @@ -0,0 +1,10 @@ +// +build !noasm,amd64 +// +build !gccgo + +package metro + +//go:generate python -m peachpy.x86_64 metro.py -S -o metro_amd64.s -mabi=goasm +//go:noescape + +func Hash64(buffer []byte, seed uint64) uint64 +func Hash64Str(buffer string, seed uint64) uint64 diff --git a/vendor/github.com/dop251/goja/README.md b/vendor/github.com/dop251/goja/README.md index 43edde91c..f1c59a910 100644 --- a/vendor/github.com/dop251/goja/README.md +++ b/vendor/github.com/dop251/goja/README.md @@ -92,18 +92,15 @@ and it includes an event loop. ### Can you implement (feature X from ES6 or higher)? -Some ES6 functionality has been implemented. So far this is mostly built-ins, not syntax enhancements. -See https://github.com/dop251/goja/milestone/1 for more details. - -The ongoing work is done in the es6 branch which is merged into master when appropriate. Every commit -in this branch represents a relatively stable state (i.e. it compiles and passes all enabled tc39 tests), -however because the version of tc39 tests I use is quite old, it may be not as well tested as the ES5.1 -functionality. Because ES6 is a superset of ES5.1 it should not break your existing code. - -I will be adding features in their dependency order and as quickly as my time allows. Please do not ask +I will be adding features in their dependency order and as quickly as time permits. Please do not ask for ETAs. Features that are open in the [milestone](https://github.com/dop251/goja/milestone/1) are either in progress or will be worked on next. +The ongoing work is done in separate feature branches which are merged into master when appropriate. +Every commit in these branches represents a relatively stable state (i.e. it compiles and passes all enabled tc39 tests), +however because the version of tc39 tests I use is quite old, it may be not as well tested as the ES5.1 functionality. Because there are (usually) no major breaking changes between ECMAScript revisions +it should not break your existing code. You are encouraged to give it a try and report any bugs found. Please do not submit fixes though without discussing it first, as the code could be changed in the meantime. + ### How do I contribute? Before submitting a pull request please make sure that: diff --git a/vendor/github.com/dop251/goja/array.go b/vendor/github.com/dop251/goja/array.go index b68fbcf91..6723f3f5f 100644 --- a/vendor/github.com/dop251/goja/array.go +++ b/vendor/github.com/dop251/goja/array.go @@ -31,7 +31,7 @@ func (ai *arrayIterObject) next() Value { if ai.kind == iterationKindKey { return ai.val.runtime.createIterResultObject(idxVal, false) } - elementValue := ai.obj.self.getIdx(idxVal, nil) + elementValue := nilSafe(ai.obj.self.getIdx(idxVal, nil)) var result Value if ai.kind == iterationKindValue { result = elementValue @@ -162,7 +162,7 @@ func (a *arrayObject) getIdx(idx valueInt, receiver Value) Value { func (a *arrayObject) getOwnPropStr(name unistring.String) Value { if len(a.values) > 0 { - if i := strToIdx(name); i != math.MaxUint32 { + if i := strToArrayIdx(name); i != math.MaxUint32 { if i < uint32(len(a.values)) { return a.values[i] } @@ -264,7 +264,7 @@ func (a *arrayObject) _setOwnIdx(idx uint32, val Value, throw bool) bool { } func (a *arrayObject) setOwnStr(name unistring.String, val Value, throw bool) bool { - if idx := strToIdx(name); idx != math.MaxUint32 { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { return a._setOwnIdx(idx, val, throw) } else { if name == "length" { @@ -325,7 +325,7 @@ func (a *arrayObject) ownKeys(all bool, accum []Value) []Value { } func (a *arrayObject) hasOwnPropertyStr(name unistring.String) bool { - if idx := strToIdx(name); idx != math.MaxUint32 { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { return idx < uint32(len(a.values)) && a.values[idx] != nil } else { return a.baseObject.hasOwnPropertyStr(name) @@ -426,14 +426,14 @@ func (a *arrayObject) _defineIdxProperty(idx uint32, desc PropertyDescriptor, th a.propValueCount++ } } else { - a.val.self.(*sparseArrayObject).add(uint32(idx), prop) + a.val.self.(*sparseArrayObject).add(idx, prop) } } return ok } func (a *arrayObject) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { - if idx := strToIdx(name); idx != math.MaxUint32 { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { return a._defineIdxProperty(idx, descr, throw) } if name == "length" { @@ -467,7 +467,7 @@ func (a *arrayObject) _deleteIdxProp(idx uint32, throw bool) bool { } func (a *arrayObject) deleteStr(name unistring.String, throw bool) bool { - if idx := strToIdx(name); idx != math.MaxUint32 { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { return a._deleteIdxProp(idx, throw) } return a.baseObject.deleteStr(name, throw) @@ -521,113 +521,3 @@ func toIdx(v valueInt) uint32 { } return math.MaxUint32 } - -func strToIdx64(s unistring.String) int64 { - if s == "" { - return -1 - } - l := len(s) - if s[0] == '0' { - if l == 1 { - return 0 - } - return -1 - } - var n int64 - if l < 19 { - // guaranteed not to overflow - for i := 0; i < len(s); i++ { - c := s[i] - if c < '0' || c > '9' { - return -1 - } - n = n*10 + int64(c-'0') - } - return n - } - if l > 19 { - // guaranteed to overflow - return -1 - } - c18 := s[18] - if c18 < '0' || c18 > '9' { - return -1 - } - for i := 0; i < 18; i++ { - c := s[i] - if c < '0' || c > '9' { - return -1 - } - n = n*10 + int64(c-'0') - } - if n >= math.MaxInt64/10+1 { - return -1 - } - n *= 10 - n1 := n + int64(c18-'0') - if n1 < n { - return -1 - } - return n1 -} - -func strToIdx(s unistring.String) uint32 { - if s == "" { - return math.MaxUint32 - } - l := len(s) - if s[0] == '0' { - if l == 1 { - return 0 - } - return math.MaxUint32 - } - var n uint32 - if l < 10 { - // guaranteed not to overflow - for i := 0; i < len(s); i++ { - c := s[i] - if c < '0' || c > '9' { - return math.MaxUint32 - } - n = n*10 + uint32(c-'0') - } - return n - } - if l > 10 { - // guaranteed to overflow - return math.MaxUint32 - } - c9 := s[9] - if c9 < '0' || c9 > '9' { - return math.MaxUint32 - } - for i := 0; i < 9; i++ { - c := s[i] - if c < '0' || c > '9' { - return math.MaxUint32 - } - n = n*10 + uint32(c-'0') - } - if n >= math.MaxUint32/10+1 { - return math.MaxUint32 - } - n *= 10 - n1 := n + uint32(c9-'0') - if n1 < n { - return math.MaxUint32 - } - - return n1 -} - -func strToGoIdx(s unistring.String) int { - if bits.UintSize == 64 { - return int(strToIdx64(s)) - } - i := strToIdx(s) - if i >= math.MaxInt32 { - return -1 - } - return int(i) -} diff --git a/vendor/github.com/dop251/goja/array_sparse.go b/vendor/github.com/dop251/goja/array_sparse.go index 904cf5313..dfb24d01d 100644 --- a/vendor/github.com/dop251/goja/array_sparse.go +++ b/vendor/github.com/dop251/goja/array_sparse.go @@ -133,7 +133,7 @@ func (a *sparseArrayObject) getLengthProp() Value { } func (a *sparseArrayObject) getOwnPropStr(name unistring.String) Value { - if idx := strToIdx(name); idx != math.MaxUint32 { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { return a._getIdx(idx) } if name == "length" { @@ -214,7 +214,7 @@ func (a *sparseArrayObject) _setOwnIdx(idx uint32, val Value, throw bool) bool { } func (a *sparseArrayObject) setOwnStr(name unistring.String, val Value, throw bool) bool { - if idx := strToIdx(name); idx != math.MaxUint32 { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { return a._setOwnIdx(idx, val, throw) } else { if name == "length" { @@ -295,7 +295,7 @@ func (a *sparseArrayObject) setValues(values []Value, objCount int) { } func (a *sparseArrayObject) hasOwnPropertyStr(name unistring.String) bool { - if idx := strToIdx(name); idx != math.MaxUint32 { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { i := a.findIdx(idx) return i < len(a.items) && a.items[i].idx == idx } else { @@ -372,7 +372,7 @@ func (a *sparseArrayObject) _defineIdxProperty(idx uint32, desc PropertyDescript } func (a *sparseArrayObject) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { - if idx := strToIdx(name); idx != math.MaxUint32 { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { return a._defineIdxProperty(idx, descr, throw) } if name == "length" { @@ -406,7 +406,7 @@ func (a *sparseArrayObject) _deleteIdxProp(idx uint32, throw bool) bool { } func (a *sparseArrayObject) deleteStr(name unistring.String, throw bool) bool { - if idx := strToIdx(name); idx != math.MaxUint32 { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { return a._deleteIdxProp(idx, throw) } return a.baseObject.deleteStr(name, throw) diff --git a/vendor/github.com/dop251/goja/ast/node.go b/vendor/github.com/dop251/goja/ast/node.go index e033726a1..a973a3095 100644 --- a/vendor/github.com/dop251/goja/ast/node.go +++ b/vendor/github.com/dop251/goja/ast/node.go @@ -15,6 +15,15 @@ import ( "github.com/dop251/goja/unistring" ) +type PropertyKind string + +const ( + PropertyKindValue PropertyKind = "value" + PropertyKindGet PropertyKind = "get" + PropertyKindSet PropertyKind = "set" + PropertyKindMethod PropertyKind = "method" +) + // All nodes implement the Node interface. type Node interface { Idx0() file.Idx // The index of the first character belonging to the node @@ -32,12 +41,34 @@ type ( _expressionNode() } + BindingTarget interface { + Expression + _bindingTarget() + } + + Binding struct { + Target BindingTarget + Initializer Expression + } + + Pattern interface { + BindingTarget + _pattern() + } + ArrayLiteral struct { LeftBracket file.Idx RightBracket file.Idx Value []Expression } + ArrayPattern struct { + LeftBracket file.Idx + RightBracket file.Idx + Elements []Expression + Rest Expression + } + AssignExpression struct { Operator token.Token Left Expression @@ -97,6 +128,23 @@ type ( DeclarationList []*VariableDeclaration } + ConciseBody interface { + Node + _conciseBody() + } + + ExpressionBody struct { + Expression Expression + } + + ArrowFunctionLiteral struct { + Start file.Idx + ParameterList *ParameterList + Body ConciseBody + Source string + DeclarationList []*VariableDeclaration + } + Identifier struct { Name unistring.String Idx file.Idx @@ -127,18 +175,40 @@ type ( Value []Property } + ObjectPattern struct { + LeftBrace file.Idx + RightBrace file.Idx + Properties []Property + Rest Expression + } + ParameterList struct { Opening file.Idx - List []*Identifier + List []*Binding + Rest Expression Closing file.Idx } - Property struct { + Property interface { + Expression + _property() + } + + PropertyShort struct { + Name Identifier + Initializer Expression + } + + PropertyKeyed struct { Key Expression - Kind string + Kind PropertyKind Value Expression } + SpreadElement struct { + Expression + } + RegExpLiteral struct { Idx file.Idx Literal string @@ -156,6 +226,21 @@ type ( Value unistring.String } + TemplateElement struct { + Idx file.Idx + Literal string + Parsed unistring.String + Valid bool + } + + TemplateLiteral struct { + OpenQuote file.Idx + CloseQuote file.Idx + Tag Expression + Elements []*TemplateElement + Expressions []Expression + } + ThisExpression struct { Idx file.Idx } @@ -167,12 +252,6 @@ type ( Postfix bool } - VariableExpression struct { - Name unistring.String - Idx file.Idx - Initializer Expression - } - MetaProperty struct { Meta, Property *Identifier Idx file.Idx @@ -191,6 +270,7 @@ func (*CallExpression) _expressionNode() {} func (*ConditionalExpression) _expressionNode() {} func (*DotExpression) _expressionNode() {} func (*FunctionLiteral) _expressionNode() {} +func (*ArrowFunctionLiteral) _expressionNode() {} func (*Identifier) _expressionNode() {} func (*NewExpression) _expressionNode() {} func (*NullLiteral) _expressionNode() {} @@ -199,10 +279,16 @@ func (*ObjectLiteral) _expressionNode() {} func (*RegExpLiteral) _expressionNode() {} func (*SequenceExpression) _expressionNode() {} func (*StringLiteral) _expressionNode() {} +func (*TemplateLiteral) _expressionNode() {} func (*ThisExpression) _expressionNode() {} func (*UnaryExpression) _expressionNode() {} -func (*VariableExpression) _expressionNode() {} func (*MetaProperty) _expressionNode() {} +func (*ObjectPattern) _expressionNode() {} +func (*ArrayPattern) _expressionNode() {} +func (*Binding) _expressionNode() {} + +func (*PropertyShort) _expressionNode() {} +func (*PropertyKeyed) _expressionNode() {} // ========= // // Statement // @@ -240,7 +326,7 @@ type ( CatchStatement struct { Catch file.Idx - Parameter *Identifier + Parameter BindingTarget Body *BlockStatement } @@ -323,13 +409,13 @@ type ( VariableStatement struct { Var file.Idx - List []*VariableExpression + List []*Binding } LexicalDeclaration struct { Idx file.Idx Token token.Token - List []*VariableExpression + List []*Binding } WhileStatement struct { @@ -382,7 +468,7 @@ func (*FunctionDeclaration) _statementNode() {} type ( VariableDeclaration struct { Var file.Idx - List []*VariableExpression + List []*Binding } ) @@ -397,7 +483,7 @@ type ( ForLoopInitializerVarDeclList struct { Var file.Idx - List []*VariableExpression + List []*Binding } ForLoopInitializerLexicalDecl struct { @@ -409,22 +495,13 @@ type ( } ForIntoVar struct { - Binding *VariableExpression - } - - ForBinding interface { - _forBinding() - } - - BindingIdentifier struct { - Idx file.Idx - Name unistring.String + Binding *Binding } ForDeclaration struct { Idx file.Idx IsConst bool - Binding ForBinding + Target BindingTarget } ForIntoExpression struct { @@ -440,7 +517,22 @@ func (*ForIntoVar) _forInto() {} func (*ForDeclaration) _forInto() {} func (*ForIntoExpression) _forInto() {} -func (*BindingIdentifier) _forBinding() {} +func (*ArrayPattern) _pattern() {} +func (*ArrayPattern) _bindingTarget() {} + +func (*ObjectPattern) _pattern() {} +func (*ObjectPattern) _bindingTarget() {} + +func (*BadExpression) _bindingTarget() {} + +func (*PropertyShort) _property() {} +func (*PropertyKeyed) _property() {} +func (*SpreadElement) _property() {} + +func (*Identifier) _bindingTarget() {} + +func (*BlockStatement) _conciseBody() {} +func (*ExpressionBody) _conciseBody() {} // ==== // // Node // @@ -459,6 +551,8 @@ type Program struct { // ==== // func (self *ArrayLiteral) Idx0() file.Idx { return self.LeftBracket } +func (self *ArrayPattern) Idx0() file.Idx { return self.LeftBracket } +func (self *ObjectPattern) Idx0() file.Idx { return self.LeftBrace } func (self *AssignExpression) Idx0() file.Idx { return self.Left.Idx0() } func (self *BadExpression) Idx0() file.Idx { return self.From } func (self *BinaryExpression) Idx0() file.Idx { return self.Left.Idx0() } @@ -468,6 +562,7 @@ func (self *CallExpression) Idx0() file.Idx { return self.Callee.Idx0() } func (self *ConditionalExpression) Idx0() file.Idx { return self.Test.Idx0() } func (self *DotExpression) Idx0() file.Idx { return self.Left.Idx0() } func (self *FunctionLiteral) Idx0() file.Idx { return self.Function } +func (self *ArrowFunctionLiteral) Idx0() file.Idx { return self.Start } func (self *Identifier) Idx0() file.Idx { return self.Idx } func (self *NewExpression) Idx0() file.Idx { return self.New } func (self *NullLiteral) Idx0() file.Idx { return self.Idx } @@ -476,9 +571,9 @@ func (self *ObjectLiteral) Idx0() file.Idx { return self.LeftBrace } func (self *RegExpLiteral) Idx0() file.Idx { return self.Idx } func (self *SequenceExpression) Idx0() file.Idx { return self.Sequence[0].Idx0() } func (self *StringLiteral) Idx0() file.Idx { return self.Idx } +func (self *TemplateLiteral) Idx0() file.Idx { return self.OpenQuote } func (self *ThisExpression) Idx0() file.Idx { return self.Idx } func (self *UnaryExpression) Idx0() file.Idx { return self.Idx } -func (self *VariableExpression) Idx0() file.Idx { return self.Idx } func (self *MetaProperty) Idx0() file.Idx { return self.Idx } func (self *BadStatement) Idx0() file.Idx { return self.From } @@ -505,14 +600,19 @@ func (self *WhileStatement) Idx0() file.Idx { return self.While } func (self *WithStatement) Idx0() file.Idx { return self.With } func (self *LexicalDeclaration) Idx0() file.Idx { return self.Idx } func (self *FunctionDeclaration) Idx0() file.Idx { return self.Function.Idx0() } +func (self *Binding) Idx0() file.Idx { return self.Target.Idx0() } func (self *ForLoopInitializerVarDeclList) Idx0() file.Idx { return self.List[0].Idx0() } +func (self *PropertyShort) Idx0() file.Idx { return self.Name.Idx } +func (self *PropertyKeyed) Idx0() file.Idx { return self.Key.Idx0() } +func (self *ExpressionBody) Idx0() file.Idx { return self.Expression.Idx0() } // ==== // // Idx1 // // ==== // -func (self *ArrayLiteral) Idx1() file.Idx { return self.RightBracket } +func (self *ArrayLiteral) Idx1() file.Idx { return self.RightBracket + 1 } +func (self *ArrayPattern) Idx1() file.Idx { return self.RightBracket + 1 } func (self *AssignExpression) Idx1() file.Idx { return self.Right.Idx1() } func (self *BadExpression) Idx1() file.Idx { return self.To } func (self *BinaryExpression) Idx1() file.Idx { return self.Right.Idx1() } @@ -522,27 +622,24 @@ func (self *CallExpression) Idx1() file.Idx { return self.RightParenthesi func (self *ConditionalExpression) Idx1() file.Idx { return self.Test.Idx1() } func (self *DotExpression) Idx1() file.Idx { return self.Identifier.Idx1() } func (self *FunctionLiteral) Idx1() file.Idx { return self.Body.Idx1() } +func (self *ArrowFunctionLiteral) Idx1() file.Idx { return self.Body.Idx1() } func (self *Identifier) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Name)) } func (self *NewExpression) Idx1() file.Idx { return self.RightParenthesis + 1 } func (self *NullLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + 4) } // "null" func (self *NumberLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } -func (self *ObjectLiteral) Idx1() file.Idx { return self.RightBrace } +func (self *ObjectLiteral) Idx1() file.Idx { return self.RightBrace + 1 } +func (self *ObjectPattern) Idx1() file.Idx { return self.RightBrace + 1 } func (self *RegExpLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } -func (self *SequenceExpression) Idx1() file.Idx { return self.Sequence[0].Idx1() } +func (self *SequenceExpression) Idx1() file.Idx { return self.Sequence[len(self.Sequence)-1].Idx1() } func (self *StringLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } -func (self *ThisExpression) Idx1() file.Idx { return self.Idx } +func (self *TemplateLiteral) Idx1() file.Idx { return self.CloseQuote + 1 } +func (self *ThisExpression) Idx1() file.Idx { return self.Idx + 4 } func (self *UnaryExpression) Idx1() file.Idx { if self.Postfix { return self.Operand.Idx1() + 2 // ++ -- } return self.Operand.Idx1() } -func (self *VariableExpression) Idx1() file.Idx { - if self.Initializer == nil { - return file.Idx(int(self.Idx) + len(self.Name) + 1) - } - return self.Initializer.Idx1() -} func (self *MetaProperty) Idx1() file.Idx { return self.Property.Idx1() } @@ -565,16 +662,41 @@ func (self *IfStatement) Idx1() file.Idx { } return self.Consequent.Idx1() } -func (self *LabelledStatement) Idx1() file.Idx { return self.Colon + 1 } -func (self *Program) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() } -func (self *ReturnStatement) Idx1() file.Idx { return self.Return } -func (self *SwitchStatement) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() } -func (self *ThrowStatement) Idx1() file.Idx { return self.Throw } -func (self *TryStatement) Idx1() file.Idx { return self.Try } +func (self *LabelledStatement) Idx1() file.Idx { return self.Colon + 1 } +func (self *Program) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() } +func (self *ReturnStatement) Idx1() file.Idx { return self.Return + 6 } +func (self *SwitchStatement) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() } +func (self *ThrowStatement) Idx1() file.Idx { return self.Argument.Idx1() } +func (self *TryStatement) Idx1() file.Idx { + if self.Finally != nil { + return self.Finally.Idx1() + } + if self.Catch != nil { + return self.Catch.Idx1() + } + return self.Body.Idx1() +} func (self *VariableStatement) Idx1() file.Idx { return self.List[len(self.List)-1].Idx1() } func (self *WhileStatement) Idx1() file.Idx { return self.Body.Idx1() } func (self *WithStatement) Idx1() file.Idx { return self.Body.Idx1() } func (self *LexicalDeclaration) Idx1() file.Idx { return self.List[len(self.List)-1].Idx1() } func (self *FunctionDeclaration) Idx1() file.Idx { return self.Function.Idx1() } +func (self *Binding) Idx1() file.Idx { + if self.Initializer != nil { + return self.Initializer.Idx1() + } + return self.Target.Idx1() +} func (self *ForLoopInitializerVarDeclList) Idx1() file.Idx { return self.List[len(self.List)-1].Idx1() } + +func (self *PropertyShort) Idx1() file.Idx { + if self.Initializer != nil { + return self.Initializer.Idx1() + } + return self.Name.Idx1() +} + +func (self *PropertyKeyed) Idx1() file.Idx { return self.Value.Idx1() } + +func (self *ExpressionBody) Idx1() file.Idx { return self.Expression.Idx1() } diff --git a/vendor/github.com/dop251/goja/builtin_array.go b/vendor/github.com/dop251/goja/builtin_array.go index fd4eb98df..668292928 100644 --- a/vendor/github.com/dop251/goja/builtin_array.go +++ b/vendor/github.com/dop251/goja/builtin_array.go @@ -347,12 +347,36 @@ func (r *Runtime) arrayproto_sort(call FunctionCall) Value { } } - ctx := arraySortCtx{ - obj: o.self, - compare: compareFn, - } + if r.checkStdArrayObj(o) != nil { + ctx := arraySortCtx{ + obj: o.self, + compare: compareFn, + } + + sort.Stable(&ctx) + } else { + length := toLength(o.self.getStr("length", nil)) + a := make([]Value, 0, length) + for i := int64(0); i < length; i++ { + idx := valueInt(i) + if o.self.hasPropertyIdx(idx) { + a = append(a, nilSafe(o.self.getIdx(idx, nil))) + } + } + ar := r.newArrayValues(a) + ctx := arraySortCtx{ + obj: ar.self, + compare: compareFn, + } - sort.Stable(&ctx) + sort.Stable(&ctx) + for i := 0; i < len(a); i++ { + o.self.setOwnIdx(valueInt(i), a[i], true) + } + for i := int64(len(a)); i < length; i++ { + o.self.deleteIdx(valueInt(i), true) + } + } return o } @@ -380,6 +404,7 @@ func (r *Runtime) arrayproto_splice(call FunctionCall) Value { for k := int64(0); k < actualDeleteCount; k++ { createDataPropertyOrThrow(a, intToValue(k), src.values[k+actualStart]) } + a.self.setOwnStr("length", intToValue(actualDeleteCount), true) } var values []Value if itemCount < actualDeleteCount { @@ -411,7 +436,7 @@ func (r *Runtime) arrayproto_splice(call FunctionCall) Value { for k := int64(0); k < actualDeleteCount; k++ { from := valueInt(k + actualStart) if o.self.hasPropertyIdx(from) { - createDataPropertyOrThrow(a, valueInt(k), o.self.getIdx(from, nil)) + createDataPropertyOrThrow(a, valueInt(k), nilSafe(o.self.getIdx(from, nil))) } } @@ -420,7 +445,7 @@ func (r *Runtime) arrayproto_splice(call FunctionCall) Value { from := valueInt(k + actualDeleteCount) to := valueInt(k + itemCount) if o.self.hasPropertyIdx(from) { - o.self.setOwnIdx(to, o.self.getIdx(from, nil), true) + o.self.setOwnIdx(to, nilSafe(o.self.getIdx(from, nil)), true) } else { o.self.deleteIdx(to, true) } @@ -434,7 +459,7 @@ func (r *Runtime) arrayproto_splice(call FunctionCall) Value { from := valueInt(k + actualDeleteCount - 1) to := valueInt(k + itemCount - 1) if o.self.hasPropertyIdx(from) { - o.self.setOwnIdx(to, o.self.getIdx(from, nil), true) + o.self.setOwnIdx(to, nilSafe(o.self.getIdx(from, nil)), true) } else { o.self.deleteIdx(to, true) } @@ -475,7 +500,7 @@ func (r *Runtime) arrayproto_unshift(call FunctionCall) Value { from := valueInt(k) to := valueInt(k + argCount) if o.self.hasPropertyIdx(from) { - o.self.setOwnIdx(to, o.self.getIdx(from, nil), true) + o.self.setOwnIdx(to, nilSafe(o.self.getIdx(from, nil)), true) } else { o.self.deleteIdx(to, true) } @@ -962,7 +987,7 @@ func (r *Runtime) arrayproto_copyWithin(call FunctionCall) Value { } for count > 0 { if o.self.hasPropertyIdx(valueInt(from)) { - o.self.setOwnIdx(valueInt(to), o.self.getIdx(valueInt(from), nil), true) + o.self.setOwnIdx(valueInt(to), nilSafe(o.self.getIdx(valueInt(from), nil)), true) } else { o.self.deleteIdx(valueInt(to), true) } @@ -1059,7 +1084,7 @@ func (r *Runtime) flattenIntoArray(target, source *Object, sourceLen, start, dep for sourceIndex < sourceLen { p := intToValue(sourceIndex) if source.hasProperty(p.toString()) { - element := source.get(p, source) + element := nilSafe(source.get(p, source)) if mapperFunction != nil { element = mapperFunction(FunctionCall{ This: thisArg, @@ -1329,11 +1354,7 @@ func (r *Runtime) createArray(val *Object) objectImpl { o._putProp("from", r.newNativeFunc(r.array_from, nil, "from", nil, 1), true, false, true) o._putProp("isArray", r.newNativeFunc(r.array_isArray, nil, "isArray", nil, 1), true, false, true) o._putProp("of", r.newNativeFunc(r.array_of, nil, "of", nil, 0), true, false, true) - o._putSym(SymSpecies, &valueProperty{ - getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), - accessor: true, - configurable: true, - }) + r.putSpeciesReturnThis(o) return o } diff --git a/vendor/github.com/dop251/goja/builtin_error.go b/vendor/github.com/dop251/goja/builtin_error.go index 5880b88dd..129c80d11 100644 --- a/vendor/github.com/dop251/goja/builtin_error.go +++ b/vendor/github.com/dop251/goja/builtin_error.go @@ -1,5 +1,27 @@ package goja +func (r *Runtime) builtin_Error(args []Value, proto *Object) *Object { + obj := r.newBaseObject(proto, classError) + if len(args) > 0 && args[0] != _undefined { + obj._putProp("message", args[0], true, false, true) + } + return obj.val +} + +func (r *Runtime) builtin_AggregateError(args []Value, proto *Object) *Object { + obj := r.newBaseObject(proto, classAggError) + if len(args) > 1 && args[1] != _undefined { + obj._putProp("message", args[1], true, false, true) + } + var errors []Value + if len(args) > 0 { + errors = r.iterableToList(args[0], nil) + } + obj._putProp("errors", r.newArrayValues(errors), true, false, true) + + return obj.val +} + func (r *Runtime) createErrorPrototype(name valueString) *Object { o := r.newBaseObject(r.global.ErrorPrototype, classObject) o._putProp("message", stringEmpty, true, false, true) @@ -17,6 +39,10 @@ func (r *Runtime) initErrors() { r.global.Error = r.newNativeFuncConstruct(r.builtin_Error, "Error", r.global.ErrorPrototype, 1) r.addToGlobal("Error", r.global.Error) + r.global.AggregateErrorPrototype = r.createErrorPrototype(stringAggregateError) + r.global.AggregateError = r.newNativeFuncConstructProto(r.builtin_AggregateError, "AggregateError", r.global.AggregateErrorPrototype, r.global.Error, 1) + r.addToGlobal("AggregateError", r.global.AggregateError) + r.global.TypeErrorPrototype = r.createErrorPrototype(stringTypeError) r.global.TypeError = r.newNativeFuncConstructProto(r.builtin_Error, "TypeError", r.global.TypeErrorPrototype, r.global.Error, 1) diff --git a/vendor/github.com/dop251/goja/builtin_function.go b/vendor/github.com/dop251/goja/builtin_function.go index e595675fd..7f0227c9f 100644 --- a/vendor/github.com/dop251/goja/builtin_function.go +++ b/vendor/github.com/dop251/goja/builtin_function.go @@ -33,10 +33,12 @@ repeat: switch f := obj.self.(type) { case *funcObject: return newStringValue(f.src) + case *arrowFuncObject: + return newStringValue(f.src) case *nativeFuncObject: - return newStringValue(fmt.Sprintf("function %s() { [native code] }", f.nameProp.get(call.This).toString())) + return newStringValue(fmt.Sprintf("function %s() { [native code] }", nilSafe(f.getStr("name", nil)).toString())) case *boundFuncObject: - return newStringValue(fmt.Sprintf("function %s() { [native code] }", f.nameProp.get(call.This).toString())) + return newStringValue(fmt.Sprintf("function %s() { [native code] }", nilSafe(f.getStr("name", nil)).toString())) case *lazyObject: obj.self = f.create(obj) goto repeat @@ -46,10 +48,12 @@ repeat: switch c := f.target.self.(type) { case *funcObject: name = c.src + case *arrowFuncObject: + name = c.src case *nativeFuncObject: - name = nilSafe(c.nameProp.get(call.This)).toString().String() + name = nilSafe(f.getStr("name", nil)).toString().String() case *boundFuncObject: - name = nilSafe(c.nameProp.get(call.This)).toString().String() + name = nilSafe(f.getStr("name", nil)).toString().String() case *lazyObject: f.target.self = c.create(obj) goto repeat2 @@ -81,7 +85,7 @@ func (r *Runtime) createListFromArrayLike(a Value) []Value { l := toLength(o.self.getStr("length", nil)) res := make([]Value, 0, l) for k := int64(0); k < l; k++ { - res = append(res, o.self.getIdx(valueInt(k), nil)) + res = append(res, nilSafe(o.self.getIdx(valueInt(k), nil))) } return res } @@ -153,7 +157,7 @@ func (r *Runtime) functionproto_bind(call FunctionCall) Value { fcall := r.toCallable(call.This) construct := obj.self.assertConstructor() - l := int(toUint32(obj.self.getStr("length", nil))) + l := int(toUint32(nilSafe(obj.self.getStr("length", nil)))) l -= len(call.Arguments) - 1 if l < 0 { l = 0 @@ -183,8 +187,7 @@ func (r *Runtime) functionproto_bind(call FunctionCall) Value { func (r *Runtime) initFunction() { o := r.global.FunctionPrototype.self.(*nativeFuncObject) o.prototype = r.global.ObjectPrototype - o.nameProp.value = stringEmpty - + o._putProp("name", stringEmpty, false, false, true) o._putProp("apply", r.newNativeFunc(r.functionproto_apply, nil, "apply", nil, 2), true, false, true) o._putProp("bind", r.newNativeFunc(r.functionproto_bind, nil, "bind", nil, 1), true, false, true) o._putProp("call", r.newNativeFunc(r.functionproto_call, nil, "call", nil, 1), true, false, true) diff --git a/vendor/github.com/dop251/goja/builtin_json.go b/vendor/github.com/dop251/goja/builtin_json.go index dcdb6cc91..ebc8dbdec 100644 --- a/vendor/github.com/dop251/goja/builtin_json.go +++ b/vendor/github.com/dop251/goja/builtin_json.go @@ -132,14 +132,11 @@ func (r *Runtime) builtinJSON_decodeArray(d *json.Decoder) (*Object, error) { } func (r *Runtime) builtinJSON_reviveWalk(reviver func(FunctionCall) Value, holder *Object, name Value) Value { - value := holder.get(name, nil) - if value == nil { - value = _undefined - } + value := nilSafe(holder.get(name, nil)) if object, ok := value.(*Object); ok { if isArray(object) { - length := object.self.getStr("length", nil).ToInteger() + length := toLength(object.self.getStr("length", nil)) for index := int64(0); index < length; index++ { name := intToValue(index) value := r.builtinJSON_reviveWalk(reviver, object, name) @@ -186,7 +183,7 @@ func (r *Runtime) builtinJSON_stringify(call FunctionCall) Value { replacer, _ := call.Argument(1).(*Object) if replacer != nil { if isArray(replacer) { - length := replacer.self.getStr("length", nil).ToInteger() + length := toLength(replacer.self.getStr("length", nil)) seen := map[string]bool{} propertyList := make([]Value, length) length = 0 @@ -264,10 +261,7 @@ func (ctx *_builtinJSON_stringifyContext) do(v Value) bool { } func (ctx *_builtinJSON_stringifyContext) str(key Value, holder *Object) bool { - value := holder.get(key, nil) - if value == nil { - value = _undefined - } + value := nilSafe(holder.get(key, nil)) if object, ok := value.(*Object); ok { if toJSON, ok := object.self.getStr("toJSON", nil).(*Object); ok { @@ -368,7 +362,7 @@ func (ctx *_builtinJSON_stringifyContext) ja(array *Object) { stepback = ctx.indent ctx.indent += ctx.gap } - length := array.self.getStr("length", nil).ToInteger() + length := toLength(array.self.getStr("length", nil)) if length == 0 { ctx.buf.WriteString("[]") return diff --git a/vendor/github.com/dop251/goja/builtin_map.go b/vendor/github.com/dop251/goja/builtin_map.go index 097dbff6b..f15c4677b 100644 --- a/vendor/github.com/dop251/goja/builtin_map.go +++ b/vendor/github.com/dop251/goja/builtin_map.go @@ -243,11 +243,7 @@ func (r *Runtime) createMapProto(val *Object) objectImpl { func (r *Runtime) createMap(val *Object) objectImpl { o := r.newNativeConstructOnly(val, r.builtin_newMap, r.global.MapPrototype, "Map", 0) - o._putSym(SymSpecies, &valueProperty{ - getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), - accessor: true, - configurable: true, - }) + r.putSpeciesReturnThis(o) return o } diff --git a/vendor/github.com/dop251/goja/builtin_object.go b/vendor/github.com/dop251/goja/builtin_object.go index b7070faac..1e828e0da 100644 --- a/vendor/github.com/dop251/goja/builtin_object.go +++ b/vendor/github.com/dop251/goja/builtin_object.go @@ -377,7 +377,7 @@ func (r *Runtime) object_entries(call FunctionCall) Value { } for item, next := iter.next(); next != nil; item, next = next() { - v := obj.self.getStr(item.name, nil) + v := nilSafe(obj.self.getStr(item.name, nil)) values = append(values, r.newArrayValues([]Value{stringValueFromRaw(item.name), v})) } @@ -393,7 +393,7 @@ func (r *Runtime) object_values(call FunctionCall) Value { } for item, next := iter.next(); next != nil; item, next = next() { - values = append(values, obj.self.getStr(item.name, nil)) + values = append(values, nilSafe(obj.self.getStr(item.name, nil))) } return r.newArrayValues(values) diff --git a/vendor/github.com/dop251/goja/builtin_promise.go b/vendor/github.com/dop251/goja/builtin_promise.go new file mode 100644 index 000000000..9d21417cb --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_promise.go @@ -0,0 +1,597 @@ +package goja + +import ( + "github.com/dop251/goja/unistring" +) + +type PromiseState int +type PromiseRejectionOperation int + +type promiseReactionType int + +const ( + PromiseStatePending PromiseState = iota + PromiseStateFulfilled + PromiseStateRejected +) + +const ( + PromiseRejectionReject PromiseRejectionOperation = iota + PromiseRejectionHandle +) + +const ( + promiseReactionFulfill promiseReactionType = iota + promiseReactionReject +) + +type PromiseRejectionTracker func(p *Promise, operation PromiseRejectionOperation) + +type jobCallback struct { + callback func(FunctionCall) Value +} + +type promiseCapability struct { + promise *Object + resolveObj, rejectObj *Object +} + +type promiseReaction struct { + capability *promiseCapability + typ promiseReactionType + handler *jobCallback +} + +// Promise is a Go wrapper around ECMAScript Promise. Calling Runtime.ToValue() on it +// returns the underlying Object. Calling Export() on a Promise Object returns a Promise. +// +// Use Runtime.NewPromise() to create one. Calling Runtime.ToValue() on a zero object or nil returns null Value. +// +// WARNING: Instances of Promise are not goroutine-safe. See Runtime.NewPromise() for more details. +type Promise struct { + baseObject + state PromiseState + result Value + fulfillReactions []*promiseReaction + rejectReactions []*promiseReaction + handled bool +} + +func (p *Promise) State() PromiseState { + return p.state +} + +func (p *Promise) Result() Value { + return p.result +} + +func (p *Promise) toValue(r *Runtime) Value { + if p == nil || p.val == nil { + return _null + } + promise := p.val + if promise.runtime != r { + panic(r.NewTypeError("Illegal runtime transition of a Promise")) + } + return promise +} + +func (p *Promise) createResolvingFunctions() (resolve, reject *Object) { + r := p.val.runtime + alreadyResolved := false + return p.val.runtime.newNativeFunc(func(call FunctionCall) Value { + if alreadyResolved { + return _undefined + } + alreadyResolved = true + resolution := call.Argument(0) + if resolution.SameAs(p.val) { + return p.reject(r.NewTypeError("Promise self-resolution")) + } + if obj, ok := resolution.(*Object); ok { + var thenAction Value + ex := r.vm.try(func() { + thenAction = obj.self.getStr("then", nil) + }) + if ex != nil { + return p.reject(ex.val) + } + if call, ok := assertCallable(thenAction); ok { + job := r.newPromiseResolveThenableJob(p, resolution, &jobCallback{callback: call}) + r.enqueuePromiseJob(job) + return _undefined + } + } + return p.fulfill(resolution) + }, nil, "", nil, 1), + p.val.runtime.newNativeFunc(func(call FunctionCall) Value { + if alreadyResolved { + return _undefined + } + alreadyResolved = true + reason := call.Argument(0) + return p.reject(reason) + }, nil, "", nil, 1) +} + +func (p *Promise) reject(reason Value) Value { + reactions := p.rejectReactions + p.result = reason + p.fulfillReactions, p.rejectReactions = nil, nil + p.state = PromiseStateRejected + r := p.val.runtime + if !p.handled { + r.trackPromiseRejection(p, PromiseRejectionReject) + } + r.triggerPromiseReactions(reactions, reason) + return _undefined +} + +func (p *Promise) fulfill(value Value) Value { + reactions := p.fulfillReactions + p.result = value + p.fulfillReactions, p.rejectReactions = nil, nil + p.state = PromiseStateFulfilled + p.val.runtime.triggerPromiseReactions(reactions, value) + return _undefined +} + +func (r *Runtime) newPromiseResolveThenableJob(p *Promise, thenable Value, then *jobCallback) func() { + return func() { + resolve, reject := p.createResolvingFunctions() + ex := r.vm.try(func() { + r.callJobCallback(then, thenable, resolve, reject) + }) + if ex != nil { + if fn, ok := reject.self.assertCallable(); ok { + fn(FunctionCall{Arguments: []Value{ex.val}}) + } + } + } +} + +func (r *Runtime) enqueuePromiseJob(job func()) { + r.jobQueue = append(r.jobQueue, job) +} + +func (r *Runtime) triggerPromiseReactions(reactions []*promiseReaction, argument Value) { + for _, reaction := range reactions { + r.enqueuePromiseJob(r.newPromiseReactionJob(reaction, argument)) + } +} + +func (r *Runtime) newPromiseReactionJob(reaction *promiseReaction, argument Value) func() { + return func() { + var handlerResult Value + fulfill := false + if reaction.handler == nil { + handlerResult = argument + if reaction.typ == promiseReactionFulfill { + fulfill = true + } + } else { + ex := r.vm.try(func() { + handlerResult = r.callJobCallback(reaction.handler, _undefined, argument) + fulfill = true + }) + if ex != nil { + handlerResult = ex.val + } + } + if reaction.capability != nil { + if fulfill { + reaction.capability.resolve(handlerResult) + } else { + reaction.capability.reject(handlerResult) + } + } + } +} + +func (r *Runtime) newPromise(proto *Object) *Promise { + o := &Object{runtime: r} + + po := &Promise{} + po.class = classPromise + po.val = o + po.extensible = true + o.self = po + po.prototype = proto + po.init() + return po +} + +func (r *Runtime) builtin_newPromise(args []Value, newTarget *Object) *Object { + if newTarget == nil { + panic(r.needNew("Promise")) + } + var arg0 Value + if len(args) > 0 { + arg0 = args[0] + } + executor := r.toCallable(arg0) + + proto := r.getPrototypeFromCtor(newTarget, r.global.Promise, r.global.PromisePrototype) + po := r.newPromise(proto) + + resolve, reject := po.createResolvingFunctions() + ex := r.vm.try(func() { + executor(FunctionCall{Arguments: []Value{resolve, reject}}) + }) + if ex != nil { + if fn, ok := reject.self.assertCallable(); ok { + fn(FunctionCall{Arguments: []Value{ex.val}}) + } + } + return po.val +} + +func (r *Runtime) promiseProto_then(call FunctionCall) Value { + thisObj := r.toObject(call.This) + if p, ok := thisObj.self.(*Promise); ok { + c := r.speciesConstructorObj(thisObj, r.global.Promise) + resultCapability := r.newPromiseCapability(c) + return r.performPromiseThen(p, call.Argument(0), call.Argument(1), resultCapability) + } + panic(r.NewTypeError("Method Promise.prototype.then called on incompatible receiver %s", thisObj)) +} + +func (r *Runtime) newPromiseCapability(c *Object) *promiseCapability { + pcap := new(promiseCapability) + if c == r.global.Promise { + p := r.newPromise(r.global.PromisePrototype) + pcap.resolveObj, pcap.rejectObj = p.createResolvingFunctions() + pcap.promise = p.val + } else { + var resolve, reject Value + executor := r.newNativeFunc(func(call FunctionCall) Value { + if resolve != nil { + panic(r.NewTypeError("resolve is already set")) + } + if reject != nil { + panic(r.NewTypeError("reject is already set")) + } + if arg := call.Argument(0); arg != _undefined { + resolve = arg + } + if arg := call.Argument(1); arg != _undefined { + reject = arg + } + return nil + }, nil, "", nil, 2) + pcap.promise = r.toConstructor(c)([]Value{executor}, c) + pcap.resolveObj = r.toObject(resolve) + r.toCallable(pcap.resolveObj) // make sure it's callable + pcap.rejectObj = r.toObject(reject) + r.toCallable(pcap.rejectObj) + } + return pcap +} + +func (r *Runtime) performPromiseThen(p *Promise, onFulfilled, onRejected Value, resultCapability *promiseCapability) Value { + var onFulfilledJobCallback, onRejectedJobCallback *jobCallback + if f, ok := assertCallable(onFulfilled); ok { + onFulfilledJobCallback = &jobCallback{callback: f} + } + if f, ok := assertCallable(onRejected); ok { + onRejectedJobCallback = &jobCallback{callback: f} + } + fulfillReaction := &promiseReaction{ + capability: resultCapability, + typ: promiseReactionFulfill, + handler: onFulfilledJobCallback, + } + rejectReaction := &promiseReaction{ + capability: resultCapability, + typ: promiseReactionReject, + handler: onRejectedJobCallback, + } + switch p.state { + case PromiseStatePending: + p.fulfillReactions = append(p.fulfillReactions, fulfillReaction) + p.rejectReactions = append(p.rejectReactions, rejectReaction) + case PromiseStateFulfilled: + r.enqueuePromiseJob(r.newPromiseReactionJob(fulfillReaction, p.result)) + default: + reason := p.result + if !p.handled { + r.trackPromiseRejection(p, PromiseRejectionHandle) + } + r.enqueuePromiseJob(r.newPromiseReactionJob(rejectReaction, reason)) + } + p.handled = true + if resultCapability == nil { + return _undefined + } + return resultCapability.promise +} + +func (r *Runtime) promiseProto_catch(call FunctionCall) Value { + return r.invoke(call.This, "then", _undefined, call.Argument(0)) +} + +func (r *Runtime) promiseResolve(c *Object, x Value) *Object { + if obj, ok := x.(*Object); ok { + xConstructor := nilSafe(obj.self.getStr("constructor", nil)) + if xConstructor.SameAs(c) { + return obj + } + } + pcap := r.newPromiseCapability(c) + pcap.resolve(x) + return pcap.promise +} + +func (r *Runtime) promiseProto_finally(call FunctionCall) Value { + promise := r.toObject(call.This) + c := r.speciesConstructorObj(promise, r.global.Promise) + onFinally := call.Argument(0) + var thenFinally, catchFinally Value + if onFinallyFn, ok := assertCallable(onFinally); !ok { + thenFinally, catchFinally = onFinally, onFinally + } else { + thenFinally = r.newNativeFunc(func(call FunctionCall) Value { + value := call.Argument(0) + result := onFinallyFn(FunctionCall{}) + promise := r.promiseResolve(c, result) + valueThunk := r.newNativeFunc(func(call FunctionCall) Value { + return value + }, nil, "", nil, 0) + return r.invoke(promise, "then", valueThunk) + }, nil, "", nil, 1) + + catchFinally = r.newNativeFunc(func(call FunctionCall) Value { + reason := call.Argument(0) + result := onFinallyFn(FunctionCall{}) + promise := r.promiseResolve(c, result) + thrower := r.newNativeFunc(func(call FunctionCall) Value { + panic(reason) + }, nil, "", nil, 0) + return r.invoke(promise, "then", thrower) + }, nil, "", nil, 1) + } + return r.invoke(promise, "then", thenFinally, catchFinally) +} + +func (pcap *promiseCapability) resolve(result Value) { + pcap.promise.runtime.toCallable(pcap.resolveObj)(FunctionCall{Arguments: []Value{result}}) +} + +func (pcap *promiseCapability) reject(reason Value) { + pcap.promise.runtime.toCallable(pcap.rejectObj)(FunctionCall{Arguments: []Value{reason}}) +} + +func (pcap *promiseCapability) try(f func()) bool { + ex := pcap.promise.runtime.vm.try(f) + if ex != nil { + pcap.reject(ex.val) + return false + } + return true +} + +func (r *Runtime) promise_all(call FunctionCall) Value { + c := r.toObject(call.This) + pcap := r.newPromiseCapability(c) + + pcap.try(func() { + promiseResolve := r.toCallable(c.self.getStr("resolve", nil)) + iter := r.getIterator(call.Argument(0), nil) + var values []Value + remainingElementsCount := 1 + r.iterate(iter, func(nextValue Value) { + index := len(values) + values = append(values, _undefined) + nextPromise := promiseResolve(FunctionCall{This: c, Arguments: []Value{nextValue}}) + alreadyCalled := false + onFulfilled := r.newNativeFunc(func(call FunctionCall) Value { + if alreadyCalled { + return _undefined + } + alreadyCalled = true + values[index] = call.Argument(0) + remainingElementsCount-- + if remainingElementsCount == 0 { + pcap.resolve(r.newArrayValues(values)) + } + return _undefined + }, nil, "", nil, 1) + remainingElementsCount++ + r.invoke(nextPromise, "then", onFulfilled, pcap.rejectObj) + }) + remainingElementsCount-- + if remainingElementsCount == 0 { + pcap.resolve(r.newArrayValues(values)) + } + }) + return pcap.promise +} + +func (r *Runtime) promise_allSettled(call FunctionCall) Value { + c := r.toObject(call.This) + pcap := r.newPromiseCapability(c) + + pcap.try(func() { + promiseResolve := r.toCallable(c.self.getStr("resolve", nil)) + iter := r.getIterator(call.Argument(0), nil) + var values []Value + remainingElementsCount := 1 + r.iterate(iter, func(nextValue Value) { + index := len(values) + values = append(values, _undefined) + nextPromise := promiseResolve(FunctionCall{This: c, Arguments: []Value{nextValue}}) + alreadyCalled := false + reaction := func(status Value, valueKey unistring.String) *Object { + return r.newNativeFunc(func(call FunctionCall) Value { + if alreadyCalled { + return _undefined + } + alreadyCalled = true + obj := r.NewObject() + obj.self._putProp("status", status, true, true, true) + obj.self._putProp(valueKey, call.Argument(0), true, true, true) + values[index] = obj + remainingElementsCount-- + if remainingElementsCount == 0 { + pcap.resolve(r.newArrayValues(values)) + } + return _undefined + }, nil, "", nil, 1) + } + onFulfilled := reaction(asciiString("fulfilled"), "value") + onRejected := reaction(asciiString("rejected"), "reason") + remainingElementsCount++ + r.invoke(nextPromise, "then", onFulfilled, onRejected) + }) + remainingElementsCount-- + if remainingElementsCount == 0 { + pcap.resolve(r.newArrayValues(values)) + } + }) + return pcap.promise +} + +func (r *Runtime) promise_any(call FunctionCall) Value { + c := r.toObject(call.This) + pcap := r.newPromiseCapability(c) + + pcap.try(func() { + promiseResolve := r.toCallable(c.self.getStr("resolve", nil)) + iter := r.getIterator(call.Argument(0), nil) + var errors []Value + remainingElementsCount := 1 + r.iterate(iter, func(nextValue Value) { + index := len(errors) + errors = append(errors, _undefined) + nextPromise := promiseResolve(FunctionCall{This: c, Arguments: []Value{nextValue}}) + alreadyCalled := false + onRejected := r.newNativeFunc(func(call FunctionCall) Value { + if alreadyCalled { + return _undefined + } + alreadyCalled = true + errors[index] = call.Argument(0) + remainingElementsCount-- + if remainingElementsCount == 0 { + _error := r.builtin_new(r.global.AggregateError, nil) + _error.self._putProp("errors", r.newArrayValues(errors), true, false, true) + pcap.reject(_error) + } + return _undefined + }, nil, "", nil, 1) + + remainingElementsCount++ + r.invoke(nextPromise, "then", pcap.resolveObj, onRejected) + }) + remainingElementsCount-- + if remainingElementsCount == 0 { + _error := r.builtin_new(r.global.AggregateError, nil) + _error.self._putProp("errors", r.newArrayValues(errors), true, false, true) + pcap.reject(_error) + } + }) + return pcap.promise +} + +func (r *Runtime) promise_race(call FunctionCall) Value { + c := r.toObject(call.This) + pcap := r.newPromiseCapability(c) + + pcap.try(func() { + promiseResolve := r.toCallable(c.self.getStr("resolve", nil)) + iter := r.getIterator(call.Argument(0), nil) + r.iterate(iter, func(nextValue Value) { + nextPromise := promiseResolve(FunctionCall{This: c, Arguments: []Value{nextValue}}) + r.invoke(nextPromise, "then", pcap.resolveObj, pcap.rejectObj) + }) + }) + return pcap.promise +} + +func (r *Runtime) promise_reject(call FunctionCall) Value { + pcap := r.newPromiseCapability(r.toObject(call.This)) + pcap.reject(call.Argument(0)) + return pcap.promise +} + +func (r *Runtime) promise_resolve(call FunctionCall) Value { + return r.promiseResolve(r.toObject(call.This), call.Argument(0)) +} + +func (r *Runtime) createPromiseProto(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject) + o._putProp("constructor", r.global.Promise, true, false, true) + + o._putProp("catch", r.newNativeFunc(r.promiseProto_catch, nil, "catch", nil, 1), true, false, true) + o._putProp("finally", r.newNativeFunc(r.promiseProto_finally, nil, "finally", nil, 1), true, false, true) + o._putProp("then", r.newNativeFunc(r.promiseProto_then, nil, "then", nil, 2), true, false, true) + + o._putSym(SymToStringTag, valueProp(asciiString(classPromise), false, false, true)) + + return o +} + +func (r *Runtime) createPromise(val *Object) objectImpl { + o := r.newNativeConstructOnly(val, r.builtin_newPromise, r.global.PromisePrototype, "Promise", 1) + + o._putProp("all", r.newNativeFunc(r.promise_all, nil, "all", nil, 1), true, false, true) + o._putProp("allSettled", r.newNativeFunc(r.promise_allSettled, nil, "allSettled", nil, 1), true, false, true) + o._putProp("any", r.newNativeFunc(r.promise_any, nil, "any", nil, 1), true, false, true) + o._putProp("race", r.newNativeFunc(r.promise_race, nil, "race", nil, 1), true, false, true) + o._putProp("reject", r.newNativeFunc(r.promise_reject, nil, "reject", nil, 1), true, false, true) + o._putProp("resolve", r.newNativeFunc(r.promise_resolve, nil, "resolve", nil, 1), true, false, true) + + r.putSpeciesReturnThis(o) + + return o +} + +func (r *Runtime) initPromise() { + r.global.PromisePrototype = r.newLazyObject(r.createPromiseProto) + r.global.Promise = r.newLazyObject(r.createPromise) + + r.addToGlobal("Promise", r.global.Promise) +} + +func (r *Runtime) wrapPromiseReaction(fObj *Object) func(interface{}) { + f, _ := AssertFunction(fObj) + return func(x interface{}) { + _, _ = f(nil, r.ToValue(x)) + } +} + +// NewPromise creates and returns a Promise and resolving functions for it. +// +// WARNING: The returned values are not goroutine-safe and must not be called in parallel with VM running. +// In order to make use of this method you need an event loop such as the one in goja_nodejs (https://github.com/dop251/goja_nodejs) +// where it can be used like this: +// +// loop := NewEventLoop() +// loop.Start() +// defer loop.Stop() +// loop.RunOnLoop(func(vm *goja.Runtime) { +// p, resolve, _ := vm.NewPromise() +// vm.Set("p", p) +// go func() { +// time.Sleep(500 * time.Millisecond) // or perform any other blocking operation +// loop.RunOnLoop(func(*goja.Runtime) { // resolve() must be called on the loop, cannot call it here +// resolve(result) +// }) +// }() +// } +func (r *Runtime) NewPromise() (promise *Promise, resolve func(result interface{}), reject func(reason interface{})) { + p := r.newPromise(r.global.PromisePrototype) + resolveF, rejectF := p.createResolvingFunctions() + return p, r.wrapPromiseReaction(resolveF), r.wrapPromiseReaction(rejectF) +} + +// SetPromiseRejectionTracker registers a function that will be called in two scenarios: when a promise is rejected +// without any handlers (with operation argument set to PromiseRejectionReject), and when a handler is added to a +// rejected promise for the first time (with operation argument set to PromiseRejectionHandle). +// +// Setting a tracker replaces any existing one. Setting it to nil disables the functionality. +// +// See https://tc39.es/ecma262/#sec-host-promise-rejection-tracker for more details. +func (r *Runtime) SetPromiseRejectionTracker(tracker PromiseRejectionTracker) { + r.promiseRejectionTracker = tracker +} diff --git a/vendor/github.com/dop251/goja/builtin_proxy.go b/vendor/github.com/dop251/goja/builtin_proxy.go index 494547a93..ee62f75e4 100644 --- a/vendor/github.com/dop251/goja/builtin_proxy.go +++ b/vendor/github.com/dop251/goja/builtin_proxy.go @@ -38,7 +38,7 @@ func (h *nativeProxyHandler) preventExtensions(target *Object) (bool, bool) { func (h *nativeProxyHandler) getOwnPropertyDescriptorStr(target *Object, prop unistring.String) (Value, bool) { if trap := h.handler.GetOwnPropertyDescriptorIdx; trap != nil { - if idx, ok := strPropToInt(prop); ok { + if idx, ok := strToInt(prop); ok { desc := trap(target, idx) return desc.toValue(target.runtime), true } @@ -72,7 +72,7 @@ func (h *nativeProxyHandler) getOwnPropertyDescriptorSym(target *Object, prop *S func (h *nativeProxyHandler) definePropertyStr(target *Object, prop unistring.String, desc PropertyDescriptor) (bool, bool) { if trap := h.handler.DefinePropertyIdx; trap != nil { - if idx, ok := strPropToInt(prop); ok { + if idx, ok := strToInt(prop); ok { return trap(target, idx, desc), true } } @@ -101,7 +101,7 @@ func (h *nativeProxyHandler) definePropertySym(target *Object, prop *Symbol, des func (h *nativeProxyHandler) hasStr(target *Object, prop unistring.String) (bool, bool) { if trap := h.handler.HasIdx; trap != nil { - if idx, ok := strPropToInt(prop); ok { + if idx, ok := strToInt(prop); ok { return trap(target, idx), true } } @@ -130,7 +130,7 @@ func (h *nativeProxyHandler) hasSym(target *Object, prop *Symbol) (bool, bool) { func (h *nativeProxyHandler) getStr(target *Object, prop unistring.String, receiver Value) (Value, bool) { if trap := h.handler.GetIdx; trap != nil { - if idx, ok := strPropToInt(prop); ok { + if idx, ok := strToInt(prop); ok { return trap(target, idx, receiver), true } } @@ -159,7 +159,7 @@ func (h *nativeProxyHandler) getSym(target *Object, prop *Symbol, receiver Value func (h *nativeProxyHandler) setStr(target *Object, prop unistring.String, value Value, receiver Value) (bool, bool) { if trap := h.handler.SetIdx; trap != nil { - if idx, ok := strPropToInt(prop); ok { + if idx, ok := strToInt(prop); ok { return trap(target, idx, value, receiver), true } } @@ -188,7 +188,7 @@ func (h *nativeProxyHandler) setSym(target *Object, prop *Symbol, value Value, r func (h *nativeProxyHandler) deleteStr(target *Object, prop unistring.String) (bool, bool) { if trap := h.handler.DeletePropertyIdx; trap != nil { - if idx, ok := strPropToInt(prop); ok { + if idx, ok := strToInt(prop); ok { return trap(target, idx), true } } @@ -246,8 +246,12 @@ func (r *Runtime) newNativeProxyHandler(nativeHandler *ProxyTrapConfig) proxyHan // ProxyTrapConfig provides a simplified Go-friendly API for implementing Proxy traps. // If an *Idx trap is defined it gets called for integer property keys, including negative ones. Note that -// this also includes string property keys that can be parsed into an integer. This allows more efficient -// array operations. +// this only includes string property keys that represent a canonical integer +// (i.e. "0", "123", but not "00", "01", " 1" or "-0"). +// For efficiency strings representing integers exceeding 2^53 are not checked to see if they are canonical, +// i.e. the *Idx traps will receive "9007199254740993" as well as "9007199254740994", even though the former is not +// a canonical representation in ECMAScript (Number("9007199254740993") === 9007199254740992). +// See https://262.ecma-international.org/#sec-canonicalnumericindexstring // If an *Idx trap is not set, the corresponding string one is used. type ProxyTrapConfig struct { // A trap for Object.getPrototypeOf, Reflect.getPrototypeOf, __proto__, Object.prototype.isPrototypeOf, instanceof diff --git a/vendor/github.com/dop251/goja/builtin_regexp.go b/vendor/github.com/dop251/goja/builtin_regexp.go index 451b87d24..5f9191de2 100644 --- a/vendor/github.com/dop251/goja/builtin_regexp.go +++ b/vendor/github.com/dop251/goja/builtin_regexp.go @@ -900,7 +900,7 @@ func (r *Runtime) regexpproto_stdSplitterGeneric(splitter *Object, s valueString } numberOfCaptures := max(toLength(z.self.getStr("length", nil))-1, 0) for i := int64(1); i <= numberOfCaptures; i++ { - a = append(a, z.self.getIdx(valueInt(i), nil)) + a = append(a, nilSafe(z.self.getIdx(valueInt(i), nil))) if int64(len(a)) == lim { return r.newArrayValues(a) } @@ -1267,10 +1267,6 @@ func (r *Runtime) initRegExp() { r.global.RegExp = r.newNativeFunc(r.builtin_RegExp, r.builtin_newRegExp, "RegExp", r.global.RegExpPrototype, 2) rx := r.global.RegExp.self - rx._putSym(SymSpecies, &valueProperty{ - getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), - accessor: true, - configurable: true, - }) + r.putSpeciesReturnThis(rx) r.addToGlobal("RegExp", r.global.RegExp) } diff --git a/vendor/github.com/dop251/goja/builtin_set.go b/vendor/github.com/dop251/goja/builtin_set.go index 4a2654058..66ebe5840 100644 --- a/vendor/github.com/dop251/goja/builtin_set.go +++ b/vendor/github.com/dop251/goja/builtin_set.go @@ -218,11 +218,7 @@ func (r *Runtime) createSetProto(val *Object) objectImpl { func (r *Runtime) createSet(val *Object) objectImpl { o := r.newNativeConstructOnly(val, r.builtin_newSet, r.global.SetPrototype, "Set", 0) - o._putSym(SymSpecies, &valueProperty{ - getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), - accessor: true, - configurable: true, - }) + r.putSpeciesReturnThis(o) return o } diff --git a/vendor/github.com/dop251/goja/builtin_string.go b/vendor/github.com/dop251/goja/builtin_string.go index 63554ff36..3718a7ece 100644 --- a/vendor/github.com/dop251/goja/builtin_string.go +++ b/vendor/github.com/dop251/goja/builtin_string.go @@ -377,7 +377,7 @@ func (r *Runtime) stringproto_matchAll(call FunctionCall) Value { if regexp != _undefined && regexp != _null { if isRegexp(regexp) { if o, ok := regexp.(*Object); ok { - flags := o.self.getStr("flags", nil) + flags := nilSafe(o.self.getStr("flags", nil)) r.checkObjectCoercible(flags) if !strings.Contains(flags.toString().String(), "g") { panic(r.NewTypeError("RegExp doesn't have global flag set")) diff --git a/vendor/github.com/dop251/goja/builtin_symbol.go b/vendor/github.com/dop251/goja/builtin_symbol.go index 9aa5e771e..00ba2888e 100644 --- a/vendor/github.com/dop251/goja/builtin_symbol.go +++ b/vendor/github.com/dop251/goja/builtin_symbol.go @@ -21,8 +21,6 @@ func (r *Runtime) builtin_symbol(call FunctionCall) Value { var desc valueString if arg := call.Argument(0); !IsUndefined(arg) { desc = arg.toString() - } else { - desc = stringEmpty } return newSymbol(desc) } diff --git a/vendor/github.com/dop251/goja/builtin_typedarrays.go b/vendor/github.com/dop251/goja/builtin_typedarrays.go index 178359775..1e1a93ef6 100644 --- a/vendor/github.com/dop251/goja/builtin_typedarrays.go +++ b/vendor/github.com/dop251/goja/builtin_typedarrays.go @@ -21,7 +21,7 @@ func (ctx *typedArraySortCtx) Len() int { func (ctx *typedArraySortCtx) Less(i, j int) bool { if ctx.needValidate { - ctx.ta.viewedArrayBuf.ensureNotDetached() + ctx.ta.viewedArrayBuf.ensureNotDetached(true) ctx.needValidate = false } offset := ctx.ta.offset @@ -54,7 +54,7 @@ func (ctx *typedArraySortCtx) Less(i, j int) bool { func (ctx *typedArraySortCtx) Swap(i, j int) { if ctx.needValidate { - ctx.ta.viewedArrayBuf.ensureNotDetached() + ctx.ta.viewedArrayBuf.ensureNotDetached(true) ctx.needValidate = false } offset := ctx.ta.offset @@ -88,8 +88,10 @@ func (r *Runtime) builtin_newArrayBuffer(args []Value, newTarget *Object) *Objec func (r *Runtime) arrayBufferProto_getByteLength(call FunctionCall) Value { o := r.toObject(call.This) if b, ok := o.self.(*arrayBufferObject); ok { - b.ensureNotDetached() - return intToValue(int64(len(b.data))) + if b.ensureNotDetached(false) { + return intToValue(int64(len(b.data))) + } + return intToValue(0) } panic(r.NewTypeError("Object is not ArrayBuffer: %s", o)) } @@ -109,16 +111,15 @@ func (r *Runtime) arrayBufferProto_slice(call FunctionCall) Value { newLen := max(stop-start, 0) ret := r.speciesConstructor(o, r.global.ArrayBuffer)([]Value{intToValue(newLen)}, nil) if ab, ok := ret.self.(*arrayBufferObject); ok { - ab.ensureNotDetached() - if ret == o { - panic(r.NewTypeError("Species constructor returned the same ArrayBuffer")) - } - if int64(len(ab.data)) < newLen { - panic(r.NewTypeError("Species constructor returned an ArrayBuffer that is too small: %d", len(ab.data))) - } - b.ensureNotDetached() - - if stop > start { + if newLen > 0 { + b.ensureNotDetached(true) + if ret == o { + panic(r.NewTypeError("Species constructor returned the same ArrayBuffer")) + } + if int64(len(ab.data)) < newLen { + panic(r.NewTypeError("Species constructor returned an ArrayBuffer that is too small: %d", len(ab.data))) + } + ab.ensureNotDetached(true) copy(ab.data, b.data[start:stop]) } return ret @@ -162,7 +163,7 @@ func (r *Runtime) newDataView(args []Value, newTarget *Object) *Object { if len(args) > 1 { offsetArg := nilSafe(args[1]) byteOffset = r.toIndex(offsetArg) - buffer.ensureNotDetached() + buffer.ensureNotDetached(true) if byteOffset > len(buffer.data) { panic(r.newError(r.global.RangeError, "Start offset %s is outside the bounds of the buffer", offsetArg.String())) } @@ -201,7 +202,7 @@ func (r *Runtime) dataViewProto_getBuffer(call FunctionCall) Value { func (r *Runtime) dataViewProto_getByteLen(call FunctionCall) Value { if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { - dv.viewedArrayBuf.ensureNotDetached() + dv.viewedArrayBuf.ensureNotDetached(true) return intToValue(int64(dv.byteLen)) } panic(r.NewTypeError("Method get DataView.prototype.byteLength called on incompatible receiver %s", call.This.String())) @@ -209,7 +210,7 @@ func (r *Runtime) dataViewProto_getByteLen(call FunctionCall) Value { func (r *Runtime) dataViewProto_getByteOffset(call FunctionCall) Value { if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { - dv.viewedArrayBuf.ensureNotDetached() + dv.viewedArrayBuf.ensureNotDetached(true) return intToValue(int64(dv.byteOffset)) } panic(r.NewTypeError("Method get DataView.prototype.byteOffset called on incompatible receiver %s", call.This.String())) @@ -392,7 +393,7 @@ func (r *Runtime) typedArrayProto_getByteOffset(call FunctionCall) Value { func (r *Runtime) typedArrayProto_copyWithin(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) l := int64(ta.length) var relEnd int64 to := toIntStrict(relToIdx(call.Argument(0).ToInteger(), l)) @@ -407,7 +408,7 @@ func (r *Runtime) typedArrayProto_copyWithin(call FunctionCall) Value { offset := ta.offset elemSize := ta.elemSize if final > from { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) copy(data[(offset+to)*elemSize:], data[(offset+from)*elemSize:(offset+final)*elemSize]) } return call.This @@ -417,7 +418,7 @@ func (r *Runtime) typedArrayProto_copyWithin(call FunctionCall) Value { func (r *Runtime) typedArrayProto_entries(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) return r.createArrayIterator(ta.val, iterationKindKeyValue) } panic(r.NewTypeError("Method TypedArray.prototype.entries called on incompatible receiver %s", call.This.String())) @@ -425,14 +426,14 @@ func (r *Runtime) typedArrayProto_entries(call FunctionCall) Value { func (r *Runtime) typedArrayProto_every(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) callbackFn := r.toCallable(call.Argument(0)) fc := FunctionCall{ This: call.Argument(1), Arguments: []Value{nil, nil, call.This}, } for k := 0; k < ta.length; k++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) fc.Arguments[0] = ta.typedArray.get(ta.offset + k) fc.Arguments[1] = intToValue(int64(k)) if !callbackFn(fc).ToBoolean() { @@ -447,7 +448,7 @@ func (r *Runtime) typedArrayProto_every(call FunctionCall) Value { func (r *Runtime) typedArrayProto_fill(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) l := int64(ta.length) k := toIntStrict(relToIdx(call.Argument(1).ToInteger(), l)) var relEnd int64 @@ -458,7 +459,7 @@ func (r *Runtime) typedArrayProto_fill(call FunctionCall) Value { } final := toIntStrict(relToIdx(relEnd, l)) value := ta.typedArray.toRaw(call.Argument(0)) - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) for ; k < final; k++ { ta.typedArray.setRaw(ta.offset+k, value) } @@ -470,7 +471,7 @@ func (r *Runtime) typedArrayProto_fill(call FunctionCall) Value { func (r *Runtime) typedArrayProto_filter(call FunctionCall) Value { o := r.toObject(call.This) if ta, ok := o.self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) callbackFn := r.toCallable(call.Argument(0)) fc := FunctionCall{ This: call.Argument(1), @@ -479,7 +480,7 @@ func (r *Runtime) typedArrayProto_filter(call FunctionCall) Value { buf := make([]byte, 0, ta.length*ta.elemSize) captured := 0 for k := 0; k < ta.length; k++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) fc.Arguments[0] = ta.typedArray.get(k) fc.Arguments[1] = intToValue(int64(k)) if callbackFn(fc).ToBoolean() { @@ -508,14 +509,14 @@ func (r *Runtime) typedArrayProto_filter(call FunctionCall) Value { func (r *Runtime) typedArrayProto_find(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) predicate := r.toCallable(call.Argument(0)) fc := FunctionCall{ This: call.Argument(1), Arguments: []Value{nil, nil, call.This}, } for k := 0; k < ta.length; k++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) val := ta.typedArray.get(ta.offset + k) fc.Arguments[0] = val fc.Arguments[1] = intToValue(int64(k)) @@ -530,14 +531,14 @@ func (r *Runtime) typedArrayProto_find(call FunctionCall) Value { func (r *Runtime) typedArrayProto_findIndex(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) predicate := r.toCallable(call.Argument(0)) fc := FunctionCall{ This: call.Argument(1), Arguments: []Value{nil, nil, call.This}, } for k := 0; k < ta.length; k++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) fc.Arguments[0] = ta.typedArray.get(ta.offset + k) fc.Arguments[1] = intToValue(int64(k)) if predicate(fc).ToBoolean() { @@ -551,14 +552,14 @@ func (r *Runtime) typedArrayProto_findIndex(call FunctionCall) Value { func (r *Runtime) typedArrayProto_forEach(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) callbackFn := r.toCallable(call.Argument(0)) fc := FunctionCall{ This: call.Argument(1), Arguments: []Value{nil, nil, call.This}, } for k := 0; k < ta.length; k++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) if val := ta.typedArray.get(k); val != nil { fc.Arguments[0] = val fc.Arguments[1] = intToValue(int64(k)) @@ -572,7 +573,7 @@ func (r *Runtime) typedArrayProto_forEach(call FunctionCall) Value { func (r *Runtime) typedArrayProto_includes(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) length := int64(ta.length) if length == 0 { return valueFalse @@ -587,7 +588,7 @@ func (r *Runtime) typedArrayProto_includes(call FunctionCall) Value { n = max(length+n, 0) } - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) searchElement := call.Argument(0) if searchElement == _negativeZero { searchElement = _positiveZero @@ -607,7 +608,7 @@ func (r *Runtime) typedArrayProto_includes(call FunctionCall) Value { func (r *Runtime) typedArrayProto_indexOf(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) length := int64(ta.length) if length == 0 { return intToValue(-1) @@ -622,7 +623,7 @@ func (r *Runtime) typedArrayProto_indexOf(call FunctionCall) Value { n = max(length+n, 0) } - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) searchElement := call.Argument(0) if searchElement == _negativeZero { searchElement = _positiveZero @@ -642,7 +643,7 @@ func (r *Runtime) typedArrayProto_indexOf(call FunctionCall) Value { func (r *Runtime) typedArrayProto_join(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) s := call.Argument(0) var sep valueString if s != _undefined { @@ -657,14 +658,14 @@ func (r *Runtime) typedArrayProto_join(call FunctionCall) Value { var buf valueStringBuilder - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) element0 := ta.typedArray.get(0) if element0 != nil && element0 != _undefined && element0 != _null { buf.WriteString(element0.toString()) } for i := 1; i < l; i++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) buf.WriteString(sep) element := ta.typedArray.get(i) if element != nil && element != _undefined && element != _null { @@ -679,7 +680,7 @@ func (r *Runtime) typedArrayProto_join(call FunctionCall) Value { func (r *Runtime) typedArrayProto_keys(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) return r.createArrayIterator(ta.val, iterationKindKey) } panic(r.NewTypeError("Method TypedArray.prototype.keys called on incompatible receiver %s", call.This.String())) @@ -687,7 +688,7 @@ func (r *Runtime) typedArrayProto_keys(call FunctionCall) Value { func (r *Runtime) typedArrayProto_lastIndexOf(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) length := int64(ta.length) if length == 0 { return intToValue(-1) @@ -709,7 +710,7 @@ func (r *Runtime) typedArrayProto_lastIndexOf(call FunctionCall) Value { } } - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) searchElement := call.Argument(0) if searchElement == _negativeZero { searchElement = _positiveZero @@ -730,7 +731,7 @@ func (r *Runtime) typedArrayProto_lastIndexOf(call FunctionCall) Value { func (r *Runtime) typedArrayProto_map(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) callbackFn := r.toCallable(call.Argument(0)) fc := FunctionCall{ This: call.Argument(1), @@ -738,7 +739,7 @@ func (r *Runtime) typedArrayProto_map(call FunctionCall) Value { } dst := r.typedArraySpeciesCreate(ta, []Value{intToValue(int64(ta.length))}) for i := 0; i < ta.length; i++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) fc.Arguments[0] = ta.typedArray.get(ta.offset + i) fc.Arguments[1] = intToValue(int64(i)) dst.typedArray.set(i, callbackFn(fc)) @@ -750,7 +751,7 @@ func (r *Runtime) typedArrayProto_map(call FunctionCall) Value { func (r *Runtime) typedArrayProto_reduce(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) callbackFn := r.toCallable(call.Argument(0)) fc := FunctionCall{ This: _undefined, @@ -769,7 +770,7 @@ func (r *Runtime) typedArrayProto_reduce(call FunctionCall) Value { panic(r.NewTypeError("Reduce of empty array with no initial value")) } for ; k < ta.length; k++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) idx := valueInt(k) fc.Arguments[1] = ta.typedArray.get(ta.offset + k) fc.Arguments[2] = idx @@ -782,7 +783,7 @@ func (r *Runtime) typedArrayProto_reduce(call FunctionCall) Value { func (r *Runtime) typedArrayProto_reduceRight(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) callbackFn := r.toCallable(call.Argument(0)) fc := FunctionCall{ This: _undefined, @@ -801,7 +802,7 @@ func (r *Runtime) typedArrayProto_reduceRight(call FunctionCall) Value { panic(r.NewTypeError("Reduce of empty array with no initial value")) } for ; k >= 0; k-- { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) idx := valueInt(k) fc.Arguments[1] = ta.typedArray.get(ta.offset + k) fc.Arguments[2] = idx @@ -814,7 +815,7 @@ func (r *Runtime) typedArrayProto_reduceRight(call FunctionCall) Value { func (r *Runtime) typedArrayProto_reverse(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) l := ta.length middle := l / 2 for lower := 0; lower != middle; lower++ { @@ -834,10 +835,10 @@ func (r *Runtime) typedArrayProto_set(call FunctionCall) Value { if targetOffset < 0 { panic(r.newError(r.global.RangeError, "offset should be >= 0")) } - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) targetLen := ta.length if src, ok := srcObj.self.(*typedArrayObject); ok { - src.viewedArrayBuf.ensureNotDetached() + src.viewedArrayBuf.ensureNotDetached(true) srcLen := src.length if x := srcLen + targetOffset; x < 0 || x > targetLen { panic(r.newError(r.global.RangeError, "Source is too large")) @@ -893,7 +894,7 @@ func (r *Runtime) typedArrayProto_set(call FunctionCall) Value { } for i := 0; i < srcLen; i++ { val := nilSafe(srcObj.self.getIdx(valueInt(i), nil)) - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) ta.typedArray.set(targetOffset+i, val) } } @@ -904,7 +905,7 @@ func (r *Runtime) typedArrayProto_set(call FunctionCall) Value { func (r *Runtime) typedArrayProto_slice(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) length := int64(ta.length) start := toIntStrict(relToIdx(call.Argument(0).ToInteger(), length)) var e int64 @@ -922,14 +923,14 @@ func (r *Runtime) typedArrayProto_slice(call FunctionCall) Value { dst := r.typedArraySpeciesCreate(ta, []Value{intToValue(int64(count))}) if dst.defaultCtor == ta.defaultCtor { if count > 0 { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) offset := ta.offset elemSize := ta.elemSize copy(dst.viewedArrayBuf.data, ta.viewedArrayBuf.data[(offset+start)*elemSize:(offset+start+count)*elemSize]) } } else { for i := 0; i < count; i++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) dst.typedArray.set(i, ta.typedArray.get(ta.offset+start+i)) } } @@ -940,14 +941,14 @@ func (r *Runtime) typedArrayProto_slice(call FunctionCall) Value { func (r *Runtime) typedArrayProto_some(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) callbackFn := r.toCallable(call.Argument(0)) fc := FunctionCall{ This: call.Argument(1), Arguments: []Value{nil, nil, call.This}, } for k := 0; k < ta.length; k++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) fc.Arguments[0] = ta.typedArray.get(ta.offset + k) fc.Arguments[1] = intToValue(int64(k)) if callbackFn(fc).ToBoolean() { @@ -961,7 +962,7 @@ func (r *Runtime) typedArrayProto_some(call FunctionCall) Value { func (r *Runtime) typedArrayProto_sort(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) var compareFn func(FunctionCall) Value if arg := call.Argument(0); arg != _undefined { @@ -1004,7 +1005,7 @@ func (r *Runtime) typedArrayProto_toLocaleString(call FunctionCall) Value { length := ta.length var buf valueStringBuilder for i := 0; i < length; i++ { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) if i > 0 { buf.WriteRune(',') } @@ -1018,7 +1019,7 @@ func (r *Runtime) typedArrayProto_toLocaleString(call FunctionCall) Value { func (r *Runtime) typedArrayProto_values(call FunctionCall) Value { if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) return r.createArrayIterator(ta.val, iterationKindValue) } panic(r.NewTypeError("Method TypedArray.prototype.values called on incompatible receiver %s", call.This.String())) @@ -1027,7 +1028,7 @@ func (r *Runtime) typedArrayProto_values(call FunctionCall) Value { func (r *Runtime) typedArrayProto_toStringTag(call FunctionCall) Value { if obj, ok := call.This.(*Object); ok { if ta, ok := obj.self.(*typedArrayObject); ok { - return ta.defaultCtor.self.getStr("name", nil) + return nilSafe(ta.defaultCtor.self.getStr("name", nil)) } } @@ -1070,7 +1071,7 @@ func (r *Runtime) typedArraySpeciesCreate(ta *typedArrayObject, args []Value) *t func (r *Runtime) typedArrayCreate(ctor *Object, args []Value) *typedArrayObject { o := r.toConstructor(ctor)(args, ctor) if ta, ok := o.self.(*typedArrayObject); ok { - ta.viewedArrayBuf.ensureNotDetached() + ta.viewedArrayBuf.ensureNotDetached(true) if len(args) == 1 { if l, ok := args[0].(valueInt); ok { if ta.length < int(l) { @@ -1093,11 +1094,7 @@ func (r *Runtime) typedArrayFrom(ctor, items *Object, mapFn, thisValue Value) *O } usingIter := toMethod(items.self.getSym(SymIterator, nil)) if usingIter != nil { - iter := r.getIterator(items, usingIter) - var values []Value - r.iterate(iter, func(item Value) { - values = append(values, item) - }) + values := r.iterableToList(items, usingIter) ta := r.typedArrayCreate(ctor, []Value{intToValue(int64(len(values)))}) if mapFc == nil { for idx, val := range values { @@ -1145,7 +1142,7 @@ func (r *Runtime) _newTypedArrayFromArrayBuffer(ab *arrayBufferObject, args []Va panic(r.newError(r.global.RangeError, "Start offset of %s should be a multiple of %d", newTarget.self.getStr("name", nil), ta.elemSize)) } } - ab.ensureNotDetached() + ab.ensureNotDetached(true) var length int if len(args) > 2 && args[2] != nil && args[2] != _undefined { length = r.toIndex(args[2]) @@ -1165,7 +1162,7 @@ func (r *Runtime) _newTypedArrayFromArrayBuffer(ab *arrayBufferObject, args []Va func (r *Runtime) _newTypedArrayFromTypedArray(src *typedArrayObject, newTarget *Object) *Object { dst := r.typedArrayCreate(newTarget, []Value{_positiveZero}) - src.viewedArrayBuf.ensureNotDetached() + src.viewedArrayBuf.ensureNotDetached(true) l := src.length dst.viewedArrayBuf.prototype = r.getPrototypeFromCtor(r.toObject(src.viewedArrayBuf.getStr("constructor", nil)), r.global.ArrayBuffer, r.global.ArrayBufferPrototype) dst.viewedArrayBuf.data = allocByteSlice(toIntStrict(int64(l) * int64(dst.elemSize))) @@ -1259,11 +1256,8 @@ func (r *Runtime) createArrayBufferProto(val *Object) objectImpl { func (r *Runtime) createArrayBuffer(val *Object) objectImpl { o := r.newNativeConstructOnly(val, r.builtin_newArrayBuffer, r.global.ArrayBufferPrototype, "ArrayBuffer", 1) o._putProp("isView", r.newNativeFunc(r.arrayBuffer_isView, nil, "isView", nil, 1), true, false, true) - o._putSym(SymSpecies, &valueProperty{ - getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), - accessor: true, - configurable: true, - }) + r.putSpeciesReturnThis(o) + return o } @@ -1374,11 +1368,7 @@ func (r *Runtime) createTypedArray(val *Object) objectImpl { o := r.newNativeConstructOnly(val, r.newTypedArray, r.global.TypedArrayPrototype, "TypedArray", 0) o._putProp("from", r.newNativeFunc(r.typedArray_from, nil, "from", nil, 1), true, false, true) o._putProp("of", r.newNativeFunc(r.typedArray_of, nil, "of", nil, 0), true, false, true) - o._putSym(SymSpecies, &valueProperty{ - getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), - accessor: true, - configurable: true, - }) + r.putSpeciesReturnThis(o) return o } diff --git a/vendor/github.com/dop251/goja/compiler.go b/vendor/github.com/dop251/goja/compiler.go index ed0c10cb7..04a4763a2 100644 --- a/vendor/github.com/dop251/goja/compiler.go +++ b/vendor/github.com/dop251/goja/compiler.go @@ -2,6 +2,7 @@ package goja import ( "fmt" + "github.com/dop251/goja/token" "sort" "github.com/dop251/goja/ast" @@ -25,9 +26,10 @@ const ( const ( maskConst = 1 << 31 maskVar = 1 << 30 - maskDeletable = maskConst + maskDeletable = 1 << 29 + maskStrict = maskDeletable - maskTyp = maskConst | maskVar + maskTyp = maskConst | maskVar | maskDeletable ) type varType byte @@ -35,6 +37,7 @@ type varType byte const ( varTypeVar varType = iota varTypeLet + varTypeStrictConst varTypeConst ) @@ -81,6 +84,7 @@ type binding struct { name unistring.String accessPoints map[*scope]*[]int isConst bool + isStrict bool isArg bool isVar bool inStash bool @@ -99,6 +103,17 @@ func (b *binding) getAccessPointsForScope(s *scope) *[]int { return m } +func (b *binding) markAccessPointAt(pos int) { + scope := b.scope.c.scope + m := b.getAccessPointsForScope(scope) + *m = append(*m, pos-scope.base) +} + +func (b *binding) markAccessPointAtScope(scope *scope, pos int) { + m := b.getAccessPointsForScope(scope) + *m = append(*m, pos-scope.base) +} + func (b *binding) markAccessPoint() { scope := b.scope.c.scope m := b.getAccessPointsForScope(scope) @@ -114,6 +129,15 @@ func (b *binding) emitGet() { } } +func (b *binding) emitGetAt(pos int) { + b.markAccessPointAt(pos) + if b.isVar && !b.isArg { + b.scope.c.p.code[pos] = loadStash(0) + } else { + b.scope.c.p.code[pos] = loadStashLex(0) + } +} + func (b *binding) emitGetP() { if b.isVar && !b.isArg { // no-op @@ -126,7 +150,9 @@ func (b *binding) emitGetP() { func (b *binding) emitSet() { if b.isConst { - b.scope.c.emit(throwAssignToConst) + if b.isStrict || b.scope.c.scope.strict { + b.scope.c.emit(throwAssignToConst) + } return } b.markAccessPoint() @@ -139,7 +165,9 @@ func (b *binding) emitSet() { func (b *binding) emitSetP() { if b.isConst { - b.scope.c.emit(throwAssignToConst) + if b.isStrict || b.scope.c.scope.strict { + b.scope.c.emit(throwAssignToConst) + } return } b.markAccessPoint() @@ -171,7 +199,11 @@ func (b *binding) emitResolveVar(strict bool) { } else { var typ varType if b.isConst { - typ = varTypeConst + if b.isStrict { + typ = varTypeStrictConst + } else { + typ = varTypeConst + } } else { typ = varTypeLet } @@ -216,6 +248,10 @@ type scope struct { // is a function or a top-level lexical environment function bool + // is an arrow function's top-level lexical environment (functions only) + arrow bool + // is a variable environment, i.e. the target for dynamically created var bindings + variable bool // a function scope that has at least one direct eval() and non-strict, so the variables can be added dynamically dynamic bool // arguments have been marked for placement in stash (functions only) @@ -331,6 +367,8 @@ func (p *Program) _dumpCode(indent string, logger func(format string, args ...in logger("%s %d: %T(%v)", indent, pc, ins, ins) if f, ok := ins.(*newFunc); ok { f.prg._dumpCode(indent+">", logger) + } else if f, ok := ins.(*newArrowFunc); ok { + f.prg._dumpCode(indent+">", logger) } } } @@ -361,7 +399,7 @@ func (s *scope) lookupName(name unistring.String) (binding *binding, noDynamics return } } - if name == "arguments" && curScope.function { + if name == "arguments" && curScope.function && !curScope.arrow { curScope.argsNeeded = true binding, _ = curScope.bindName(name) return @@ -379,28 +417,33 @@ func (s *scope) ensureBoundNamesCreated() { } } -func (s *scope) bindNameLexical(name unistring.String, unique bool, offset int) (*binding, bool) { - if b := s.boundNames[name]; b != nil { - if unique { - s.c.throwSyntaxError(offset, "Identifier '%s' has already been declared", name) - } - return b, false - } +func (s *scope) addBinding(offset int) *binding { if len(s.bindings) >= (1<<24)-1 { s.c.throwSyntaxError(offset, "Too many variables") } b := &binding{ scope: s, - name: name, } s.bindings = append(s.bindings, b) + return b +} + +func (s *scope) bindNameLexical(name unistring.String, unique bool, offset int) (*binding, bool) { + if b := s.boundNames[name]; b != nil { + if unique { + s.c.throwSyntaxError(offset, "Identifier '%s' has already been declared", name) + } + return b, false + } + b := s.addBinding(offset) + b.name = name s.ensureBoundNamesCreated() s.boundNames[name] = b return b, true } func (s *scope) bindName(name unistring.String) (*binding, bool) { - if !s.function && s.outer != nil { + if !s.function && !s.variable && s.outer != nil { return s.outer.bindName(name) } b, created := s.bindNameLexical(name, false, 0) @@ -476,6 +519,8 @@ func (s *scope) finaliseVarAlloc(stackOffset int) (stashSize, stackSize int) { *ap = initStash(idx) case *loadMixed: i.idx = idx + case *loadMixedLex: + i.idx = idx case *resolveMixed: i.idx = idx } @@ -592,6 +637,9 @@ func (s *scope) makeNamesMap() map[unistring.String]uint32 { idx := uint32(i) if b.isConst { idx |= maskConst + if b.isStrict { + idx |= maskStrict + } } if b.isVar { idx |= maskVar @@ -629,7 +677,7 @@ func (c *compiler) compile(in *ast.Program, strict, eval, inGlobal bool) { scope.dynamic = true scope.eval = eval if !strict && len(in.Body) > 0 { - strict = c.isStrict(in.Body) + strict = c.isStrict(in.Body) != nil } scope.strict = strict ownVarScope := eval && strict @@ -710,7 +758,7 @@ func (c *compiler) compile(in *ast.Program, strict, eval, inGlobal bool) { func (c *compiler) compileDeclList(v []*ast.VariableDeclaration, inFunc bool) { for _, value := range v { - c.compileVarDecl(value, inFunc) + c.createVarBindings(value, inFunc) } } @@ -744,7 +792,7 @@ func (c *compiler) extractFunctions(list []ast.Statement) (funcs []*ast.Function func (c *compiler) createFunctionBindings(funcs []*ast.FunctionDeclaration) { s := c.scope if s.outer != nil { - unique := !s.function && s.strict + unique := !s.function && !s.variable && s.strict for _, decl := range funcs { s.bindNameLexical(decl.Function.Name.Name, unique, int(decl.Function.Name.Idx1())-1) } @@ -786,14 +834,133 @@ func (c *compiler) compileFunctionsGlobal(list []*ast.FunctionDeclaration) { } } -func (c *compiler) compileVarDecl(v *ast.VariableDeclaration, inFunc bool) { +func (c *compiler) createVarIdBinding(name unistring.String, offset int, inFunc bool) { + if c.scope.strict { + c.checkIdentifierLName(name, offset) + c.checkIdentifierName(name, offset) + } + if !inFunc || name != "arguments" { + c.scope.bindName(name) + } +} + +func (c *compiler) createBindings(target ast.Expression, createIdBinding func(name unistring.String, offset int)) { + switch target := target.(type) { + case *ast.Identifier: + createIdBinding(target.Name, int(target.Idx)-1) + case *ast.ObjectPattern: + for _, prop := range target.Properties { + switch prop := prop.(type) { + case *ast.PropertyShort: + createIdBinding(prop.Name.Name, int(prop.Name.Idx)-1) + case *ast.PropertyKeyed: + c.createBindings(prop.Value, createIdBinding) + default: + c.throwSyntaxError(int(target.Idx0()-1), "unsupported property type in ObjectPattern: %T", prop) + } + } + if target.Rest != nil { + c.createBindings(target.Rest, createIdBinding) + } + case *ast.ArrayPattern: + for _, elt := range target.Elements { + if elt != nil { + c.createBindings(elt, createIdBinding) + } + } + if target.Rest != nil { + c.createBindings(target.Rest, createIdBinding) + } + case *ast.AssignExpression: + c.createBindings(target.Left, createIdBinding) + default: + c.throwSyntaxError(int(target.Idx0()-1), "unsupported binding target: %T", target) + } +} + +func (c *compiler) createVarBinding(target ast.Expression, inFunc bool) { + c.createBindings(target, func(name unistring.String, offset int) { + c.createVarIdBinding(name, offset, inFunc) + }) +} + +func (c *compiler) createVarBindings(v *ast.VariableDeclaration, inFunc bool) { for _, item := range v.List { - if c.scope.strict { - c.checkIdentifierLName(item.Name, int(item.Idx)-1) - c.checkIdentifierName(item.Name, int(item.Idx)-1) + c.createVarBinding(item.Target, inFunc) + } +} + +func (c *compiler) createLexicalIdBinding(name unistring.String, isConst bool, offset int) *binding { + if name == "let" { + c.throwSyntaxError(offset, "let is disallowed as a lexically bound name") + } + if c.scope.strict { + c.checkIdentifierLName(name, offset) + c.checkIdentifierName(name, offset) + } + b, _ := c.scope.bindNameLexical(name, true, offset) + if isConst { + b.isConst, b.isStrict = true, true + } + return b +} + +func (c *compiler) createLexicalIdBindingFuncBody(name unistring.String, isConst bool, offset int, calleeBinding *binding) *binding { + if name == "let" { + c.throwSyntaxError(offset, "let is disallowed as a lexically bound name") + } + if c.scope.strict { + c.checkIdentifierLName(name, offset) + c.checkIdentifierName(name, offset) + } + paramScope := c.scope.outer + parentBinding := paramScope.boundNames[name] + if parentBinding != nil { + if parentBinding != calleeBinding && (name != "arguments" || !paramScope.argsNeeded) { + c.throwSyntaxError(offset, "Identifier '%s' has already been declared", name) } - if !inFunc || item.Name != "arguments" { - c.scope.bindName(item.Name) + } + b, _ := c.scope.bindNameLexical(name, true, offset) + if isConst { + b.isConst, b.isStrict = true, true + } + return b +} + +func (c *compiler) createLexicalBinding(target ast.Expression, isConst bool) { + c.createBindings(target, func(name unistring.String, offset int) { + c.createLexicalIdBinding(name, isConst, offset) + }) +} + +func (c *compiler) createLexicalBindings(lex *ast.LexicalDeclaration) { + for _, d := range lex.List { + c.createLexicalBinding(d.Target, lex.Token == token.CONST) + } +} + +func (c *compiler) compileLexicalDeclarations(list []ast.Statement, scopeDeclared bool) bool { + for _, st := range list { + if lex, ok := st.(*ast.LexicalDeclaration); ok { + if !scopeDeclared { + c.newBlockScope() + scopeDeclared = true + } + c.createLexicalBindings(lex) + } + } + return scopeDeclared +} + +func (c *compiler) compileLexicalDeclarationsFuncBody(list []ast.Statement, calleeBinding *binding) { + for _, st := range list { + if lex, ok := st.(*ast.LexicalDeclaration); ok { + isConst := lex.Token == token.CONST + for _, d := range lex.List { + c.createBindings(d.Target, func(name unistring.String, offset int) { + c.createLexicalIdBindingFuncBody(name, isConst, offset, calleeBinding) + }) + } } } } @@ -834,12 +1001,12 @@ func (c *compiler) throwSyntaxError(offset int, format string, args ...interface }) } -func (c *compiler) isStrict(list []ast.Statement) bool { +func (c *compiler) isStrict(list []ast.Statement) *ast.StringLiteral { for _, st := range list { if st, ok := st.(*ast.ExpressionStatement); ok { if e, ok := st.Expression.(*ast.StringLiteral); ok { if e.Literal == `"use strict"` || e.Literal == `'use strict'` { - return true + return e } } else { break @@ -848,14 +1015,14 @@ func (c *compiler) isStrict(list []ast.Statement) bool { break } } - return false + return nil } -func (c *compiler) isStrictStatement(s ast.Statement) bool { +func (c *compiler) isStrictStatement(s ast.Statement) *ast.StringLiteral { if s, ok := s.(*ast.BlockStatement); ok { return c.isStrict(s.List) } - return false + return nil } func (c *compiler) checkIdentifierName(name unistring.String, offset int) { diff --git a/vendor/github.com/dop251/goja/compiler_expr.go b/vendor/github.com/dop251/goja/compiler_expr.go index 1ef4246ee..669cd3431 100644 --- a/vendor/github.com/dop251/goja/compiler_expr.go +++ b/vendor/github.com/dop251/goja/compiler_expr.go @@ -2,21 +2,16 @@ package goja import ( "fmt" - "regexp" - "github.com/dop251/goja/ast" "github.com/dop251/goja/file" "github.com/dop251/goja/token" "github.com/dop251/goja/unistring" ) -var ( - octalRegexp = regexp.MustCompile(`^0[0-7]`) -) - type compiledExpr interface { emitGetter(putOnStack bool) emitSetter(valueExpr compiledExpr, putOnStack bool) + emitRef() emitUnary(prepare, body func(), postfix, putOnStack bool) deleteExpr() compiledExpr constant() bool @@ -32,6 +27,12 @@ type compiledCallExpr struct { baseCompiledExpr args []compiledExpr callee compiledExpr + + isVariadic bool +} + +type compiledNewExpr struct { + compiledCallExpr } type compiledObjectLiteral struct { @@ -54,12 +55,29 @@ type compiledLiteral struct { val Value } +type compiledTemplateLiteral struct { + baseCompiledExpr + tag compiledExpr + elements []*ast.TemplateElement + expressions []compiledExpr +} + type compiledAssignExpr struct { baseCompiledExpr left, right compiledExpr operator token.Token } +type compiledObjectAssignmentPattern struct { + baseCompiledExpr + expr *ast.ObjectPattern +} + +type compiledArrayAssignmentPattern struct { + baseCompiledExpr + expr *ast.ArrayPattern +} + type deleteGlobalExpr struct { baseCompiledExpr name unistring.String @@ -98,10 +116,15 @@ type compiledIdentifierExpr struct { type compiledFunctionLiteral struct { baseCompiledExpr - expr *ast.FunctionLiteral - lhsName unistring.String - isExpr bool - strict bool + name *ast.Identifier + parameterList *ast.ParameterList + body []ast.Statement + source string + declarationList []*ast.VariableDeclaration + lhsName unistring.String + strict *ast.StringLiteral + isExpr bool + isArrow bool } type compiledBracketExpr struct { @@ -113,12 +136,6 @@ type compiledThisExpr struct { baseCompiledExpr } -type compiledNewExpr struct { - baseCompiledExpr - callee compiledExpr - args []compiledExpr -} - type compiledNewTarget struct { baseCompiledExpr } @@ -156,17 +173,16 @@ type compiledBinaryExpr struct { operator token.Token } -type compiledVariableExpr struct { +type compiledEnumGetExpr struct { baseCompiledExpr - name unistring.String - initializer compiledExpr } -type compiledEnumGetExpr struct { +type defaultDeleteExpr struct { baseCompiledExpr + expr compiledExpr } -type defaultDeleteExpr struct { +type compiledSpreadCallArgument struct { baseCompiledExpr expr compiledExpr } @@ -189,6 +205,8 @@ func (c *compiler) compileExpression(v ast.Expression) compiledExpr { return c.compileNumberLiteral(v) case *ast.StringLiteral: return c.compileStringLiteral(v) + case *ast.TemplateLiteral: + return c.compileTemplateLiteral(v) case *ast.BooleanLiteral: return c.compileBooleanLiteral(v) case *ast.NullLiteral: @@ -207,8 +225,6 @@ func (c *compiler) compileExpression(v ast.Expression) compiledExpr { return c.compileArrayLiteral(v) case *ast.RegExpLiteral: return c.compileRegexpLiteral(v) - case *ast.VariableExpression: - return c.compileVariableExpression(v) case *ast.BinaryExpression: return c.compileBinaryExpression(v) case *ast.UnaryExpression: @@ -217,6 +233,8 @@ func (c *compiler) compileExpression(v ast.Expression) compiledExpr { return c.compileConditionalExpression(v) case *ast.FunctionLiteral: return c.compileFunctionLiteral(v, true) + case *ast.ArrowFunctionLiteral: + return c.compileArrowFunctionLiteral(v) case *ast.DotExpression: r := &compiledDotExpr{ left: c.compileExpression(v.Left), @@ -241,6 +259,10 @@ func (c *compiler) compileExpression(v ast.Expression) compiledExpr { return c.compileNewExpression(v) case *ast.MetaProperty: return c.compileMetaProperty(v) + case *ast.ObjectPattern: + return c.compileObjectAssignmentPattern(v) + case *ast.ArrayPattern: + return c.compileArrayAssignmentPattern(v) default: panic(fmt.Errorf("Unknown expression type: %T", v)) } @@ -259,6 +281,10 @@ func (e *baseCompiledExpr) emitSetter(compiledExpr, bool) { e.c.throwSyntaxError(e.offset, "Not a valid left-value expression") } +func (e *baseCompiledExpr) emitRef() { + e.c.throwSyntaxError(e.offset, "Cannot emit reference for this type of expression") +} + func (e *baseCompiledExpr) deleteExpr() compiledExpr { r := &constantExpr{ val: valueTrue, @@ -391,8 +417,25 @@ func (c *compiler) emitVarSetter(name unistring.String, offset int, valueExpr co }) } -func (e *compiledVariableExpr) emitSetter(valueExpr compiledExpr, putOnStack bool) { - e.c.emitVarSetter(e.name, e.offset, valueExpr, putOnStack) +func (c *compiler) emitVarRef(name unistring.String, offset int) { + if c.scope.strict { + c.checkIdentifierLName(name, offset) + } + + b, _ := c.scope.lookupName(name) + if b != nil { + b.emitResolveVar(c.scope.strict) + } else { + if c.scope.strict { + c.emit(resolveVar1Strict(name)) + } else { + c.emit(resolveVar1(name)) + } + } +} + +func (e *compiledIdentifierExpr) emitRef() { + e.c.emitVarRef(e.name, e.offset) } func (e *compiledIdentifierExpr) emitSetter(valueExpr compiledExpr, putOnStack bool) { @@ -476,6 +519,15 @@ func (e *compiledDotExpr) emitGetter(putOnStack bool) { } } +func (e *compiledDotExpr) emitRef() { + e.left.emitGetter(true) + if e.c.scope.strict { + e.c.emit(getPropRefStrict(e.name)) + } else { + e.c.emit(getPropRef(e.name)) + } +} + func (e *compiledDotExpr) emitSetter(valueExpr compiledExpr, putOnStack bool) { e.left.emitGetter(true) valueExpr.emitGetter(true) @@ -558,6 +610,16 @@ func (e *compiledBracketExpr) emitGetter(putOnStack bool) { } } +func (e *compiledBracketExpr) emitRef() { + e.left.emitGetter(true) + e.member.emitGetter(true) + if e.c.scope.strict { + e.c.emit(getElemRefStrict) + } else { + e.c.emit(getElemRef) + } +} + func (e *compiledBracketExpr) emitSetter(valueExpr compiledExpr, putOnStack bool) { e.left.emitGetter(true) e.member.emitGetter(true) @@ -688,7 +750,7 @@ func (e *compiledAssignExpr) emitGetter(putOnStack bool) { switch e.operator { case token.ASSIGN: if fn, ok := e.right.(*compiledFunctionLiteral); ok { - if fn.expr.Name == nil { + if fn.name == nil { if id, ok := e.left.(*compiledIdentifierExpr); ok { fn.lhsName = id.name } @@ -766,17 +828,104 @@ func (e *compiledLiteral) constant() bool { return true } +func (e *compiledTemplateLiteral) emitGetter(putOnStack bool) { + if e.tag == nil { + if len(e.elements) == 0 { + e.c.emit(loadVal(e.c.p.defineLiteralValue(stringEmpty))) + } else { + tail := e.elements[len(e.elements)-1].Parsed + if len(e.elements) == 1 { + e.c.emit(loadVal(e.c.p.defineLiteralValue(stringValueFromRaw(tail)))) + } else { + stringCount := 0 + if head := e.elements[0].Parsed; head != "" { + e.c.emit(loadVal(e.c.p.defineLiteralValue(stringValueFromRaw(head)))) + stringCount++ + } + e.expressions[0].emitGetter(true) + e.c.emit(_toString{}) + stringCount++ + for i := 1; i < len(e.elements)-1; i++ { + if elt := e.elements[i].Parsed; elt != "" { + e.c.emit(loadVal(e.c.p.defineLiteralValue(stringValueFromRaw(elt)))) + stringCount++ + } + e.expressions[i].emitGetter(true) + e.c.emit(_toString{}) + stringCount++ + } + if tail != "" { + e.c.emit(loadVal(e.c.p.defineLiteralValue(stringValueFromRaw(tail)))) + stringCount++ + } + e.c.emit(concatStrings(stringCount)) + } + } + } else { + cooked := make([]Value, len(e.elements)) + raw := make([]Value, len(e.elements)) + for i, elt := range e.elements { + raw[i] = &valueProperty{ + enumerable: true, + value: newStringValue(elt.Literal), + } + var cookedVal Value + if elt.Valid { + cookedVal = stringValueFromRaw(elt.Parsed) + } else { + cookedVal = _undefined + } + cooked[i] = &valueProperty{ + enumerable: true, + value: cookedVal, + } + } + e.c.emitCallee(e.tag) + e.c.emit(&getTaggedTmplObject{ + raw: raw, + cooked: cooked, + }) + for _, expr := range e.expressions { + expr.emitGetter(true) + } + e.c.emit(call(len(e.expressions) + 1)) + } + if !putOnStack { + e.c.emit(pop) + } +} + +func (c *compiler) compileParameterBindingIdentifier(name unistring.String, offset int) (*binding, bool) { + if c.scope.strict { + c.checkIdentifierName(name, offset) + c.checkIdentifierLName(name, offset) + } + return c.scope.bindNameShadow(name) +} + +func (c *compiler) compileParameterPatternIdBinding(name unistring.String, offset int) { + if _, unique := c.compileParameterBindingIdentifier(name, offset); !unique { + c.throwSyntaxError(offset, "Duplicate parameter name not allowed in this context") + } +} + +func (c *compiler) compileParameterPatternBinding(item ast.Expression) { + c.createBindings(item, c.compileParameterPatternIdBinding) +} + func (e *compiledFunctionLiteral) emitGetter(putOnStack bool) { savedPrg := e.c.p e.c.p = &Program{ src: e.c.p.src, } e.c.newScope() - e.c.scope.function = true + s := e.c.scope + s.function = true + s.arrow = e.isArrow var name unistring.String - if e.expr.Name != nil { - name = e.expr.Name.Name + if e.name != nil { + name = e.name.Name } else { name = e.lhsName } @@ -793,48 +942,187 @@ func (e *compiledFunctionLiteral) emitGetter(putOnStack bool) { typ: blockScope, } - if !e.c.scope.strict { - e.c.scope.strict = e.strict + if !s.strict { + s.strict = e.strict != nil } - if e.c.scope.strict { - for _, item := range e.expr.ParameterList.List { - e.c.checkIdentifierName(item.Name, int(item.Idx)-1) - e.c.checkIdentifierLName(item.Name, int(item.Idx)-1) - } + hasPatterns := false + hasInits := false + firstDupIdx := -1 + length := 0 + + if e.parameterList.Rest != nil { + hasPatterns = true // strictly speaking not, but we need to activate all the checks } - length := len(e.expr.ParameterList.List) + // First, make sure that the first bindings correspond to the formal parameters + for _, item := range e.parameterList.List { + switch tgt := item.Target.(type) { + case *ast.Identifier: + offset := int(tgt.Idx) - 1 + b, unique := e.c.compileParameterBindingIdentifier(tgt.Name, offset) + if !unique { + firstDupIdx = offset + } + b.isArg = true + case ast.Pattern: + b := s.addBinding(int(item.Idx0()) - 1) + b.isArg = true + hasPatterns = true + default: + e.c.throwSyntaxError(int(item.Idx0())-1, "Unsupported BindingElement type: %T", item) + return + } + if item.Initializer != nil { + hasInits = true + } - for _, item := range e.expr.ParameterList.List { - b, unique := e.c.scope.bindNameShadow(item.Name) - if !unique && e.c.scope.strict { - e.c.throwSyntaxError(int(item.Idx)-1, "Strict mode function may not have duplicate parameter names (%s)", item.Name) + if firstDupIdx >= 0 && (hasPatterns || hasInits || s.strict || e.isArrow) { + e.c.throwSyntaxError(firstDupIdx, "Duplicate parameter name not allowed in this context") return } - b.isArg = true - b.isVar = true + + if (hasPatterns || hasInits) && e.strict != nil { + e.c.throwSyntaxError(int(e.strict.Idx)-1, "Illegal 'use strict' directive in function with non-simple parameter list") + return + } + + if !hasInits { + length++ + } } - paramsCount := len(e.c.scope.bindings) - e.c.scope.numArgs = paramsCount - e.c.compileDeclList(e.expr.DeclarationList, true) - body := e.expr.Body.List - funcs := e.c.extractFunctions(body) - e.c.createFunctionBindings(funcs) - s := e.c.scope - e.c.compileLexicalDeclarations(body, true) - var calleeBinding *binding - if e.isExpr && e.expr.Name != nil { - if b, created := s.bindName(e.expr.Name.Name); created { - calleeBinding = b + + // create pattern bindings + if hasPatterns { + for _, item := range e.parameterList.List { + switch tgt := item.Target.(type) { + case *ast.Identifier: + // we already created those in the previous loop, skipping + default: + e.c.compileParameterPatternBinding(tgt) + } + } + if rest := e.parameterList.Rest; rest != nil { + e.c.compileParameterPatternBinding(rest) } } + + paramsCount := len(e.parameterList.List) + + s.numArgs = paramsCount + body := e.body + funcs := e.c.extractFunctions(body) + var calleeBinding *binding preambleLen := 4 // enter, boxThis, createArgs, set e.c.p.code = make([]instruction, preambleLen, 8) - if calleeBinding != nil { - e.c.emit(loadCallee) - calleeBinding.emitSetP() + emitArgsRestMark := -1 + firstForwardRef := -1 + enterFunc2Mark := -1 + + if hasPatterns || hasInits { + if e.isExpr && e.name != nil { + if b, created := s.bindNameLexical(e.name.Name, false, 0); created { + b.isConst = true + calleeBinding = b + } + } + if calleeBinding != nil { + e.c.emit(loadCallee) + calleeBinding.emitInit() + } + for i, item := range e.parameterList.List { + if pattern, ok := item.Target.(ast.Pattern); ok { + i := i + e.c.compilePatternInitExpr(func() { + if firstForwardRef == -1 { + s.bindings[i].emitGet() + } else { + e.c.emit(loadStackLex(-i - 1)) + } + }, item.Initializer, item.Target.Idx0()).emitGetter(true) + e.c.emitPattern(pattern, func(target, init compiledExpr) { + e.c.emitPatternLexicalAssign(target, init, false) + }, false) + } else if item.Initializer != nil { + markGet := len(e.c.p.code) + e.c.emit(nil) + mark := len(e.c.p.code) + e.c.emit(nil) + e.c.compileExpression(item.Initializer).emitGetter(true) + if firstForwardRef == -1 && (s.isDynamic() || s.bindings[i].useCount() > 0) { + firstForwardRef = i + } + if firstForwardRef == -1 { + s.bindings[i].emitGetAt(markGet) + } else { + e.c.p.code[markGet] = loadStackLex(-i - 1) + } + s.bindings[i].emitInit() + e.c.p.code[mark] = jdefP(len(e.c.p.code) - mark) + } else { + if firstForwardRef == -1 && s.bindings[i].useCount() > 0 { + firstForwardRef = i + } + if firstForwardRef != -1 { + e.c.emit(loadStackLex(-i - 1)) + s.bindings[i].emitInit() + } + } + } + if rest := e.parameterList.Rest; rest != nil { + e.c.emitAssign(rest, e.c.compileEmitterExpr( + func() { + emitArgsRestMark = len(e.c.p.code) + e.c.emit(createArgsRestStack(paramsCount)) + }, rest.Idx0()), + func(target, init compiledExpr) { + e.c.emitPatternLexicalAssign(target, init, false) + }) + } + if firstForwardRef != -1 { + for _, b := range s.bindings { + b.inStash = true + } + s.argsInStash = true + s.needStash = true + } + + e.c.newBlockScope() + varScope := e.c.scope + varScope.variable = true + enterFunc2Mark = len(e.c.p.code) + e.c.emit(nil) + e.c.compileDeclList(e.declarationList, false) + e.c.createFunctionBindings(funcs) + e.c.compileLexicalDeclarationsFuncBody(body, calleeBinding) + for _, b := range varScope.bindings { + if b.isVar { + if parentBinding := s.boundNames[b.name]; parentBinding != nil && parentBinding != calleeBinding { + parentBinding.emitGet() + b.emitSetP() + } + } + } + } else { + // To avoid triggering variable conflict when binding from non-strict direct eval(). + // Parameters are supposed to be in a parent scope, hence no conflict. + for _, b := range s.bindings[:paramsCount] { + b.isVar = true + } + e.c.compileDeclList(e.declarationList, true) + e.c.createFunctionBindings(funcs) + e.c.compileLexicalDeclarations(body, true) + if e.isExpr && e.name != nil { + if b, created := s.bindNameLexical(e.name.Name, false, 0); created { + b.isConst = true + calleeBinding = b + } + } + if calleeBinding != nil { + e.c.emit(loadCallee) + calleeBinding.emitInit() + } } e.c.compileFunctions(funcs) @@ -856,23 +1144,31 @@ func (e *compiledFunctionLiteral) emitGetter(putOnStack bool) { preambleLen += 2 } - if (s.argsNeeded || s.isDynamic()) && !s.argsInStash { + if !s.argsInStash && (s.argsNeeded || s.isDynamic()) { s.moveArgsToStash() } if s.argsNeeded { - pos := preambleLen - 2 - delta += 2 - if s.strict { - code[pos] = createArgsStrict(length) + b, created := s.bindNameLexical("arguments", false, 0) + if !created && !b.isVar { + s.argsNeeded = false } else { - code[pos] = createArgs(length) + if s.strict { + b.isConst = true + } else { + b.isVar = e.c.scope.function + } + pos := preambleLen - 2 + delta += 2 + if s.strict || hasPatterns || hasInits { + code[pos] = createArgsUnmapped(paramsCount) + } else { + code[pos] = createArgsMapped(paramsCount) + } + pos++ + b.markAccessPointAtScope(s, pos) + code[pos] = storeStashP(0) } - pos++ - b, _ := s.bindName("arguments") - e.c.p.code = code[:pos] - b.emitSetP() - e.c.p.code = code } stashSize, stackSize := s.finaliseVarAlloc(0) @@ -885,22 +1181,60 @@ func (e *compiledFunctionLiteral) emitGetter(putOnStack bool) { delta = preambleLen - delta var enter instruction if stashSize > 0 || s.argsInStash { - enter1 := enterFunc{ - numArgs: uint32(paramsCount), - argsToStash: s.argsInStash, - stashSize: uint32(stashSize), - stackSize: uint32(stackSize), - extensible: s.dynamic, + if firstForwardRef == -1 { + enter1 := enterFunc{ + numArgs: uint32(paramsCount), + argsToStash: s.argsInStash, + stashSize: uint32(stashSize), + stackSize: uint32(stackSize), + extensible: s.dynamic, + } + if s.isDynamic() { + enter1.names = s.makeNamesMap() + } + enter = &enter1 + if enterFunc2Mark != -1 { + ef2 := &enterFuncBody{ + extensible: e.c.scope.dynamic, + } + e.c.updateEnterBlock(&ef2.enterBlock) + e.c.p.code[enterFunc2Mark] = ef2 + } + } else { + enter1 := enterFunc1{ + stashSize: uint32(stashSize), + numArgs: uint32(paramsCount), + argsToCopy: uint32(firstForwardRef), + extensible: s.dynamic, + } + if s.isDynamic() { + enter1.names = s.makeNamesMap() + } + enter = &enter1 + if enterFunc2Mark != -1 { + ef2 := &enterFuncBody{ + adjustStack: true, + extensible: e.c.scope.dynamic, + } + e.c.updateEnterBlock(&ef2.enterBlock) + e.c.p.code[enterFunc2Mark] = ef2 + } } - if s.isDynamic() { - enter1.names = s.makeNamesMap() + if emitArgsRestMark != -1 && s.argsInStash { + e.c.p.code[emitArgsRestMark] = createArgsRestStash } - enter = &enter1 } else { enter = &enterFuncStashless{ stackSize: uint32(stackSize), args: uint32(paramsCount), } + if enterFunc2Mark != -1 { + ef2 := &enterFuncBody{ + extensible: e.c.scope.dynamic, + } + e.c.updateEnterBlock(&ef2.enterBlock) + e.c.p.code[enterFunc2Mark] = ef2 + } } code[delta] = enter if delta != 0 { @@ -914,23 +1248,63 @@ func (e *compiledFunctionLiteral) emitGetter(putOnStack bool) { strict := s.strict p := e.c.p // e.c.p.dumpCode() + if enterFunc2Mark != -1 { + e.c.popScope() + } e.c.popScope() e.c.p = savedPrg - e.c.emit(&newFunc{prg: p, length: uint32(length), name: name, srcStart: uint32(e.expr.Idx0() - 1), srcEnd: uint32(e.expr.Idx1() - 1), strict: strict}) + if e.isArrow { + e.c.emit(&newArrowFunc{newFunc: newFunc{prg: p, length: uint32(length), name: name, source: e.source, strict: strict}}) + } else { + e.c.emit(&newFunc{prg: p, length: uint32(length), name: name, source: e.source, strict: strict}) + } if !putOnStack { e.c.emit(pop) } } func (c *compiler) compileFunctionLiteral(v *ast.FunctionLiteral, isExpr bool) *compiledFunctionLiteral { - strict := c.scope.strict || c.isStrictStatement(v.Body) - if v.Name != nil && strict { + strictBody := c.isStrictStatement(v.Body) + if v.Name != nil && (c.scope.strict || strictBody != nil) { c.checkIdentifierLName(v.Name.Name, int(v.Name.Idx)-1) } r := &compiledFunctionLiteral{ - expr: v, - isExpr: isExpr, - strict: strict, + name: v.Name, + parameterList: v.ParameterList, + body: v.Body.List, + source: v.Source, + declarationList: v.DeclarationList, + isExpr: isExpr, + strict: strictBody, + } + r.init(c, v.Idx0()) + return r +} + +func (c *compiler) compileArrowFunctionLiteral(v *ast.ArrowFunctionLiteral) *compiledFunctionLiteral { + var strictBody *ast.StringLiteral + var body []ast.Statement + switch b := v.Body.(type) { + case *ast.BlockStatement: + strictBody = c.isStrictStatement(b) + body = b.List + case *ast.ExpressionBody: + body = []ast.Statement{ + &ast.ReturnStatement{ + Argument: b.Expression, + }, + } + default: + c.throwSyntaxError(int(b.Idx0())-1, "Unsupported ConciseBody type: %T", b) + } + r := &compiledFunctionLiteral{ + parameterList: v.ParameterList, + body: body, + source: v.Source, + declarationList: v.DeclarationList, + isExpr: true, + isArrow: true, + strict: strictBody, } r.init(c, v.Idx0()) return r @@ -940,7 +1314,7 @@ func (e *compiledThisExpr) emitGetter(putOnStack bool) { if putOnStack { e.addSrcMap() scope := e.c.scope - for ; scope != nil && !scope.function && !scope.eval; scope = scope.outer { + for ; scope != nil && (scope.arrow || !scope.function && !scope.eval); scope = scope.outer { } if scope != nil { @@ -953,25 +1327,45 @@ func (e *compiledThisExpr) emitGetter(putOnStack bool) { } func (e *compiledNewExpr) emitGetter(putOnStack bool) { + if e.isVariadic { + e.c.emit(startVariadic) + } e.callee.emitGetter(true) for _, expr := range e.args { expr.emitGetter(true) } e.addSrcMap() - e.c.emit(_new(len(e.args))) + if e.isVariadic { + e.c.emit(newVariadic, endVariadic) + } else { + e.c.emit(_new(len(e.args))) + } if !putOnStack { e.c.emit(pop) } } -func (c *compiler) compileNewExpression(v *ast.NewExpression) compiledExpr { - args := make([]compiledExpr, len(v.ArgumentList)) - for i, expr := range v.ArgumentList { - args[i] = c.compileExpression(expr) +func (c *compiler) compileCallArgs(list []ast.Expression) (args []compiledExpr, isVariadic bool) { + args = make([]compiledExpr, len(list)) + for i, argExpr := range list { + if spread, ok := argExpr.(*ast.SpreadElement); ok { + args[i] = c.compileSpreadCallArgument(spread) + isVariadic = true + } else { + args[i] = c.compileExpression(argExpr) + } } + return +} + +func (c *compiler) compileNewExpression(v *ast.NewExpression) compiledExpr { + args, isVariadic := c.compileCallArgs(v.ArgumentList) r := &compiledNewExpr{ - callee: c.compileExpression(v.Callee), - args: args, + compiledCallExpr: compiledCallExpr{ + callee: c.compileExpression(v.Callee), + args: args, + isVariadic: isVariadic, + }, } r.init(c, v.Idx0()) return r @@ -1373,64 +1767,84 @@ func (c *compiler) compileLogicalAnd(left, right ast.Expression, idx file.Idx) c return r } -func (e *compiledVariableExpr) emitGetter(putOnStack bool) { - if e.initializer != nil { - idExpr := &compiledIdentifierExpr{ - name: e.name, - } - idExpr.init(e.c, file.Idx(0)) - idExpr.emitSetter(e.initializer, putOnStack) - } else { - if putOnStack { - e.c.emit(loadUndef) - } - } -} - -func (c *compiler) compileVariableExpression(v *ast.VariableExpression) compiledExpr { - r := &compiledVariableExpr{ - name: v.Name, - initializer: c.compileExpression(v.Initializer), - } - if fn, ok := r.initializer.(*compiledFunctionLiteral); ok { - fn.lhsName = v.Name - } - r.init(c, v.Idx0()) - return r -} - func (e *compiledObjectLiteral) emitGetter(putOnStack bool) { e.addSrcMap() e.c.emit(newObject) for _, prop := range e.expr.Value { - keyExpr := e.c.compileExpression(prop.Key) - cl, ok := keyExpr.(*compiledLiteral) - if !ok { - e.c.throwSyntaxError(e.offset, "non-literal properties in object literal are not supported yet") - } - key := cl.val.string() - valueExpr := e.c.compileExpression(prop.Value) - if fn, ok := valueExpr.(*compiledFunctionLiteral); ok { - if fn.expr.Name == nil { - fn.lhsName = key + switch prop := prop.(type) { + case *ast.PropertyKeyed: + keyExpr := e.c.compileExpression(prop.Key) + computed := false + var key unistring.String + switch keyExpr := keyExpr.(type) { + case *compiledLiteral: + key = keyExpr.val.string() + default: + keyExpr.emitGetter(true) + computed = true } - } - valueExpr.emitGetter(true) - switch prop.Kind { - case "value": - if key == __proto__ { - e.c.emit(setProto) + valueExpr := e.c.compileExpression(prop.Value) + var anonFn *compiledFunctionLiteral + if fn, ok := valueExpr.(*compiledFunctionLiteral); ok { + if fn.name == nil { + anonFn = fn + fn.lhsName = key + } + } + if computed { + e.c.emit(_toPropertyKey{}) + valueExpr.emitGetter(true) + switch prop.Kind { + case ast.PropertyKindValue, ast.PropertyKindMethod: + if anonFn != nil { + e.c.emit(setElem1Named) + } else { + e.c.emit(setElem1) + } + case ast.PropertyKindGet: + e.c.emit(setPropGetter1) + case ast.PropertyKindSet: + e.c.emit(setPropSetter1) + default: + panic(fmt.Errorf("unknown property kind: %s", prop.Kind)) + } } else { - e.c.emit(setProp1(key)) + if anonFn != nil { + anonFn.lhsName = key + } + valueExpr.emitGetter(true) + switch prop.Kind { + case ast.PropertyKindValue: + if key == __proto__ { + e.c.emit(setProto) + } else { + e.c.emit(setProp1(key)) + } + case ast.PropertyKindMethod: + e.c.emit(setProp1(key)) + case ast.PropertyKindGet: + e.c.emit(setPropGetter(key)) + case ast.PropertyKindSet: + e.c.emit(setPropSetter(key)) + default: + panic(fmt.Errorf("unknown property kind: %s", prop.Kind)) + } } - case "method": + case *ast.PropertyShort: + key := prop.Name.Name + if prop.Initializer != nil { + e.c.throwSyntaxError(int(prop.Initializer.Idx0())-1, "Invalid shorthand property initializer") + } + if e.c.scope.strict && key == "let" { + e.c.throwSyntaxError(e.offset, "'let' cannot be used as a shorthand property in strict mode") + } + e.c.compileIdentifierExpression(&prop.Name).emitGetter(true) e.c.emit(setProp1(key)) - case "get": - e.c.emit(setPropGetter(key)) - case "set": - e.c.emit(setPropSetter(key)) + case *ast.SpreadElement: + e.c.compileExpression(prop.Expression).emitGetter(true) + e.c.emit(copySpread) default: - panic(fmt.Errorf("unknown property kind: %s", prop.Kind)) + panic(fmt.Errorf("unknown Property type: %T", prop)) } } if !putOnStack { @@ -1448,23 +1862,28 @@ func (c *compiler) compileObjectLiteral(v *ast.ObjectLiteral) compiledExpr { func (e *compiledArrayLiteral) emitGetter(putOnStack bool) { e.addSrcMap() - objCount := 0 + hasSpread := false + mark := len(e.c.p.code) + e.c.emit(nil) for _, v := range e.expr.Value { - if v != nil { - e.c.compileExpression(v).emitGetter(true) - objCount++ + if spread, ok := v.(*ast.SpreadElement); ok { + hasSpread = true + e.c.compileExpression(spread.Expression).emitGetter(true) + e.c.emit(pushArraySpread) } else { - e.c.emit(loadNil) + if v != nil { + e.c.compileExpression(v).emitGetter(true) + } else { + e.c.emit(loadNil) + } + e.c.emit(pushArrayItem) } } - if objCount == len(e.expr.Value) { - e.c.emit(newArray(objCount)) - } else { - e.c.emit(&newArraySparse{ - l: len(e.expr.Value), - objCount: objCount, - }) + var objCount uint32 + if !hasSpread { + objCount = uint32(len(e.expr.Value)) } + e.c.p.code[mark] = newArray(objCount) if !putOnStack { e.c.emit(pop) } @@ -1497,25 +1916,32 @@ func (c *compiler) compileRegexpLiteral(v *ast.RegExpLiteral) compiledExpr { return r } -func (e *compiledCallExpr) emitGetter(putOnStack bool) { - var calleeName unistring.String - switch callee := e.callee.(type) { +func (c *compiler) emitCallee(callee compiledExpr) (calleeName unistring.String) { + switch callee := callee.(type) { case *compiledDotExpr: callee.left.emitGetter(true) - e.c.emit(dup) - e.c.emit(getPropCallee(callee.name)) + c.emit(dup) + c.emit(getPropCallee(callee.name)) case *compiledBracketExpr: callee.left.emitGetter(true) - e.c.emit(dup) + c.emit(dup) callee.member.emitGetter(true) - e.c.emit(getElemCallee) + c.emit(getElemCallee) case *compiledIdentifierExpr: calleeName = callee.name callee.emitGetterAndCallee() default: - e.c.emit(loadUndef) + c.emit(loadUndef) callee.emitGetter(true) } + return +} + +func (e *compiledCallExpr) emitGetter(putOnStack bool) { + if e.isVariadic { + e.c.emit(startVariadic) + } + calleeName := e.c.emitCallee(e.callee) for _, expr := range e.args { expr.emitGetter(true) @@ -1523,11 +1949,14 @@ func (e *compiledCallExpr) emitGetter(putOnStack bool) { e.addSrcMap() if calleeName == "eval" { - foundFunc := false + foundFunc, foundVar := false, false for sc := e.c.scope; sc != nil; sc = sc.outer { - if !foundFunc && sc.function { + if !foundFunc && sc.function && !sc.arrow { foundFunc = true sc.thisNeeded, sc.argsNeeded = true, true + } + if !foundVar && (sc.variable || sc.function) { + foundVar = true if !sc.strict { sc.dynamic = true } @@ -1536,14 +1965,28 @@ func (e *compiledCallExpr) emitGetter(putOnStack bool) { } if e.c.scope.strict { - e.c.emit(callEvalStrict(len(e.args))) + if e.isVariadic { + e.c.emit(callEvalVariadicStrict) + } else { + e.c.emit(callEvalStrict(len(e.args))) + } } else { - e.c.emit(callEval(len(e.args))) + if e.isVariadic { + e.c.emit(callEvalVariadic) + } else { + e.c.emit(callEval(len(e.args))) + } } } else { - e.c.emit(call(len(e.args))) + if e.isVariadic { + e.c.emit(callVariadic) + } else { + e.c.emit(call(len(e.args))) + } + } + if e.isVariadic { + e.c.emit(endVariadic) } - if !putOnStack { e.c.emit(pop) } @@ -1557,16 +2000,31 @@ func (e *compiledCallExpr) deleteExpr() compiledExpr { return r } +func (c *compiler) compileSpreadCallArgument(spread *ast.SpreadElement) compiledExpr { + r := &compiledSpreadCallArgument{ + expr: c.compileExpression(spread.Expression), + } + r.init(c, spread.Idx0()) + return r +} + func (c *compiler) compileCallExpression(v *ast.CallExpression) compiledExpr { args := make([]compiledExpr, len(v.ArgumentList)) + isVariadic := false for i, argExpr := range v.ArgumentList { - args[i] = c.compileExpression(argExpr) + if spread, ok := argExpr.(*ast.SpreadElement); ok { + args[i] = c.compileSpreadCallArgument(spread) + isVariadic = true + } else { + args[i] = c.compileExpression(argExpr) + } } r := &compiledCallExpr{ - args: args, - callee: c.compileExpression(v.Callee), + args: args, + callee: c.compileExpression(v.Callee), + isVariadic: isVariadic, } r.init(c, v.LeftParenthesis) return r @@ -1586,7 +2044,7 @@ func (c *compiler) compileIdentifierExpression(v *ast.Identifier) compiledExpr { } func (c *compiler) compileNumberLiteral(v *ast.NumberLiteral) compiledExpr { - if c.scope.strict && octalRegexp.MatchString(v.Literal) { + if c.scope.strict && len(v.Literal) > 1 && v.Literal[0] == '0' && v.Literal[1] <= '7' && v.Literal[1] >= '0' { c.throwSyntaxError(int(v.Idx)-1, "Octal literals are not allowed in strict mode") panic("Unreachable") } @@ -1614,6 +2072,21 @@ func (c *compiler) compileStringLiteral(v *ast.StringLiteral) compiledExpr { return r } +func (c *compiler) compileTemplateLiteral(v *ast.TemplateLiteral) compiledExpr { + r := &compiledTemplateLiteral{} + if v.Tag != nil { + r.tag = c.compileExpression(v.Tag) + } + ce := make([]compiledExpr, len(v.Expressions)) + for i, expr := range v.Expressions { + ce[i] = c.compileExpression(expr) + } + r.expressions = ce + r.elements = v.Elements + r.init(c, v.Idx0()) + return r +} + func (c *compiler) compileBooleanLiteral(v *ast.BooleanLiteral) compiledExpr { var val Value if v.Value { @@ -1647,3 +2120,273 @@ func (e *compiledEnumGetExpr) emitGetter(putOnStack bool) { e.c.emit(pop) } } + +func (c *compiler) compileObjectAssignmentPattern(v *ast.ObjectPattern) compiledExpr { + r := &compiledObjectAssignmentPattern{ + expr: v, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledObjectAssignmentPattern) emitGetter(putOnStack bool) { + if putOnStack { + e.c.emit(loadUndef) + } +} + +func (c *compiler) compileArrayAssignmentPattern(v *ast.ArrayPattern) compiledExpr { + r := &compiledArrayAssignmentPattern{ + expr: v, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledArrayAssignmentPattern) emitGetter(putOnStack bool) { + if putOnStack { + e.c.emit(loadUndef) + } +} + +func (c *compiler) emitNamed(expr compiledExpr, name unistring.String) { + if en, ok := expr.(interface { + emitNamed(name unistring.String) + }); ok { + en.emitNamed(name) + } else { + expr.emitGetter(true) + } +} + +func (e *compiledFunctionLiteral) emitNamed(name unistring.String) { + e.lhsName = name + e.emitGetter(true) +} + +func (c *compiler) emitPattern(pattern ast.Pattern, emitter func(target, init compiledExpr), putOnStack bool) { + switch pattern := pattern.(type) { + case *ast.ObjectPattern: + c.emitObjectPattern(pattern, emitter, putOnStack) + case *ast.ArrayPattern: + c.emitArrayPattern(pattern, emitter, putOnStack) + default: + panic(fmt.Errorf("unsupported Pattern: %T", pattern)) + } +} + +func (c *compiler) emitAssign(target ast.Expression, init compiledExpr, emitAssignSimple func(target, init compiledExpr)) { + pattern, isPattern := target.(ast.Pattern) + if isPattern { + init.emitGetter(true) + c.emitPattern(pattern, emitAssignSimple, false) + } else { + emitAssignSimple(c.compileExpression(target), init) + } +} + +func (c *compiler) emitObjectPattern(pattern *ast.ObjectPattern, emitAssign func(target, init compiledExpr), putOnStack bool) { + if pattern.Rest != nil { + c.emit(createDestructSrc) + } else { + c.emit(checkObjectCoercible) + } + for _, prop := range pattern.Properties { + switch prop := prop.(type) { + case *ast.PropertyShort: + c.emit(dup) + emitAssign(c.compileIdentifierExpression(&prop.Name), c.compilePatternInitExpr(func() { + c.emit(getProp(prop.Name.Name)) + }, prop.Initializer, prop.Idx0())) + case *ast.PropertyKeyed: + c.emit(dup) + c.compileExpression(prop.Key).emitGetter(true) + c.emit(_toPropertyKey{}) + var target ast.Expression + var initializer ast.Expression + if e, ok := prop.Value.(*ast.AssignExpression); ok { + target = e.Left + initializer = e.Right + } else { + target = prop.Value + } + c.emitAssign(target, c.compilePatternInitExpr(func() { + c.emit(getKey) + }, initializer, prop.Idx0()), emitAssign) + default: + c.throwSyntaxError(int(prop.Idx0()-1), "Unsupported AssignmentProperty type: %T", prop) + } + } + if pattern.Rest != nil { + emitAssign(c.compileExpression(pattern.Rest), c.compileEmitterExpr(func() { + c.emit(copyRest) + }, pattern.Rest.Idx0())) + c.emit(pop) + } + if !putOnStack { + c.emit(pop) + } +} + +func (c *compiler) emitArrayPattern(pattern *ast.ArrayPattern, emitAssign func(target, init compiledExpr), putOnStack bool) { + var marks []int + c.emit(iterate) + for _, elt := range pattern.Elements { + switch elt := elt.(type) { + case nil: + marks = append(marks, len(c.p.code)) + c.emit(nil) + case *ast.AssignExpression: + c.emitAssign(elt.Left, c.compilePatternInitExpr(func() { + marks = append(marks, len(c.p.code)) + c.emit(nil, enumGet) + }, elt.Right, elt.Idx0()), emitAssign) + default: + c.emitAssign(elt, c.compileEmitterExpr(func() { + marks = append(marks, len(c.p.code)) + c.emit(nil, enumGet) + }, elt.Idx0()), emitAssign) + } + } + if pattern.Rest != nil { + c.emitAssign(pattern.Rest, c.compileEmitterExpr(func() { + c.emit(newArrayFromIter) + }, pattern.Rest.Idx0()), emitAssign) + } else { + c.emit(enumPopClose) + } + mark1 := len(c.p.code) + c.emit(nil) + + for i, elt := range pattern.Elements { + switch elt := elt.(type) { + case nil: + c.p.code[marks[i]] = iterNext(len(c.p.code) - marks[i]) + case *ast.Identifier: + emitAssign(c.compileIdentifierExpression(elt), c.compileEmitterExpr(func() { + c.p.code[marks[i]] = iterNext(len(c.p.code) - marks[i]) + c.emit(loadUndef) + }, elt.Idx0())) + case *ast.AssignExpression: + c.emitAssign(elt.Left, c.compileNamedEmitterExpr(func(name unistring.String) { + c.p.code[marks[i]] = iterNext(len(c.p.code) - marks[i]) + c.emitNamed(c.compileExpression(elt.Right), name) + }, elt.Idx0()), emitAssign) + default: + c.emitAssign(elt, c.compileEmitterExpr( + func() { + c.p.code[marks[i]] = iterNext(len(c.p.code) - marks[i]) + c.emit(loadUndef) + }, elt.Idx0()), emitAssign) + } + } + c.emit(enumPop) + if pattern.Rest != nil { + c.emitAssign(pattern.Rest, c.compileExpression( + &ast.ArrayLiteral{ + LeftBracket: pattern.Rest.Idx0(), + RightBracket: pattern.Rest.Idx0(), + }), emitAssign) + } + c.p.code[mark1] = jump(len(c.p.code) - mark1) + + if !putOnStack { + c.emit(pop) + } +} + +func (e *compiledObjectAssignmentPattern) emitSetter(valueExpr compiledExpr, putOnStack bool) { + valueExpr.emitGetter(true) + e.c.emitObjectPattern(e.expr, e.c.emitPatternAssign, putOnStack) +} + +func (e *compiledArrayAssignmentPattern) emitSetter(valueExpr compiledExpr, putOnStack bool) { + valueExpr.emitGetter(true) + e.c.emitArrayPattern(e.expr, e.c.emitPatternAssign, putOnStack) +} + +type compiledPatternInitExpr struct { + baseCompiledExpr + emitSrc func() + def compiledExpr +} + +func (e *compiledPatternInitExpr) emitGetter(putOnStack bool) { + if !putOnStack { + return + } + e.emitSrc() + if e.def != nil { + mark := len(e.c.p.code) + e.c.emit(nil) + e.def.emitGetter(true) + e.c.p.code[mark] = jdef(len(e.c.p.code) - mark) + } +} + +func (e *compiledPatternInitExpr) emitNamed(name unistring.String) { + e.emitSrc() + if e.def != nil { + mark := len(e.c.p.code) + e.c.emit(nil) + e.c.emitNamed(e.def, name) + e.c.p.code[mark] = jdef(len(e.c.p.code) - mark) + } +} + +func (c *compiler) compilePatternInitExpr(emitSrc func(), def ast.Expression, idx file.Idx) compiledExpr { + r := &compiledPatternInitExpr{ + emitSrc: emitSrc, + def: c.compileExpression(def), + } + r.init(c, idx) + return r +} + +type compiledEmitterExpr struct { + baseCompiledExpr + emitter func() + namedEmitter func(name unistring.String) +} + +func (e *compiledEmitterExpr) emitGetter(putOnStack bool) { + if e.emitter != nil { + e.emitter() + } else { + e.namedEmitter("") + } + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *compiledEmitterExpr) emitNamed(name unistring.String) { + if e.namedEmitter != nil { + e.namedEmitter(name) + } else { + e.emitter() + } +} + +func (c *compiler) compileEmitterExpr(emitter func(), idx file.Idx) *compiledEmitterExpr { + r := &compiledEmitterExpr{ + emitter: emitter, + } + r.init(c, idx) + return r +} + +func (c *compiler) compileNamedEmitterExpr(namedEmitter func(unistring.String), idx file.Idx) *compiledEmitterExpr { + r := &compiledEmitterExpr{ + namedEmitter: namedEmitter, + } + r.init(c, idx) + return r +} + +func (e *compiledSpreadCallArgument) emitGetter(putOnStack bool) { + e.expr.emitGetter(putOnStack) + if putOnStack { + e.c.emit(pushSpread) + } +} diff --git a/vendor/github.com/dop251/goja/compiler_stmt.go b/vendor/github.com/dop251/goja/compiler_stmt.go index 9d1a9a1c7..dfc4bc9ad 100644 --- a/vendor/github.com/dop251/goja/compiler_stmt.go +++ b/vendor/github.com/dop251/goja/compiler_stmt.go @@ -102,12 +102,6 @@ func (c *compiler) updateEnterBlock(enter *enterBlock) { } func (c *compiler) compileTryStatement(v *ast.TryStatement, needResult bool) { - if c.scope.strict && v.Catch != nil && v.Catch.Parameter != nil { - switch v.Catch.Parameter.Name { - case "arguments", "eval": - c.throwSyntaxError(int(v.Catch.Parameter.Idx)-1, "Catch variable may not be eval or arguments in strict mode") - } - } c.block = &block{ typ: blockTry, outer: c.block, @@ -146,16 +140,31 @@ func (c *compiler) compileTryStatement(v *ast.TryStatement, needResult bool) { c.newBlockScope() list := v.Catch.Body.List funcs := c.extractFunctions(list) - c.createFunctionBindings(funcs) - c.scope.bindNameLexical(v.Catch.Parameter.Name, true, int(v.Catch.Parameter.Idx)-1) - bindings := c.scope.bindings - if l := len(bindings); l > 1 { - // make sure the catch variable always goes first - bindings[0], bindings[l-1] = bindings[l-1], bindings[0] + if _, ok := v.Catch.Parameter.(ast.Pattern); ok { + // add anonymous binding for the catch parameter, note it must be first + c.scope.addBinding(int(v.Catch.Idx0()) - 1) } - c.compileLexicalDeclarations(list, true) + c.createBindings(v.Catch.Parameter, func(name unistring.String, offset int) { + if c.scope.strict { + switch name { + case "arguments", "eval": + c.throwSyntaxError(offset, "Catch variable may not be eval or arguments in strict mode") + } + } + c.scope.bindNameLexical(name, true, offset) + }) enter := &enterBlock{} c.emit(enter) + if pattern, ok := v.Catch.Parameter.(ast.Pattern); ok { + c.scope.bindings[0].emitGet() + c.emitPattern(pattern, func(target, init compiledExpr) { + c.emitPatternLexicalAssign(target, init, false) + }, false) + } + for _, decl := range funcs { + c.scope.bindNameLexical(decl.Function.Name.Name, true, int(decl.Function.Name.Idx1())-1) + } + c.compileLexicalDeclarations(list, true) c.compileFunctions(funcs) c.compileStatements(list, bodyNeedResult) c.leaveScopeBlock(enter) @@ -252,7 +261,7 @@ func (c *compiler) compileLabeledForStatement(v *ast.ForStatement, needResult bo enterIterBlock = c.compileForHeadLexDecl(&init.LexicalDeclaration, needResult) case *ast.ForLoopInitializerVarDeclList: for _, expr := range init.List { - c.compileVariableExpression(expr).emitGetter(false) + c.compileVarBinding(expr) } case *ast.ForLoopInitializerExpression: c.compileExpression(init.Expression).emitGetter(false) @@ -347,10 +356,15 @@ func (c *compiler) compileForInto(into ast.ForInto, needResult bool) (enter *ent if c.scope.strict && into.Binding.Initializer != nil { c.throwSyntaxError(int(into.Binding.Initializer.Idx0())-1, "for-in loop variable declaration may not have an initializer.") } - c.compileIdentifierExpression(&ast.Identifier{ - Name: into.Binding.Name, - Idx: into.Binding.Idx0(), - }).emitSetter(&c.enumGetExpr, false) + switch target := into.Binding.Target.(type) { + case *ast.Identifier: + c.compileIdentifierExpression(target).emitSetter(&c.enumGetExpr, false) + case ast.Pattern: + c.emit(enumGet) + c.emitPattern(target, c.emitPatternVarAssign, false) + default: + c.throwSyntaxError(int(target.Idx0()-1), "unsupported for-in var target: %T", target) + } case *ast.ForDeclaration: c.block = &block{ @@ -362,12 +376,19 @@ func (c *compiler) compileForInto(into ast.ForInto, needResult bool) (enter *ent c.newBlockScope() enter = &enterBlock{} c.emit(enter) - if binding, ok := into.Binding.(*ast.BindingIdentifier); ok { - b := c.createLexicalBinding(binding.Name, into.IsConst, int(into.Idx)-1) - c.enumGetExpr.emitGetter(true) + switch target := into.Target.(type) { + case *ast.Identifier: + b := c.createLexicalIdBinding(target.Name, into.IsConst, int(into.Idx)-1) + c.emit(enumGet) b.emitInit() - } else { - c.throwSyntaxError(int(into.Idx)-1, "Unsupported ForBinding: %T", into.Binding) + case ast.Pattern: + c.createLexicalBinding(target, into.IsConst) + c.emit(enumGet) + c.emitPattern(target, func(target, init compiledExpr) { + c.emitPatternLexicalAssign(target, init, into.IsConst) + }, false) + default: + c.throwSyntaxError(int(into.Idx)-1, "Unsupported ForBinding: %T", into.Target) } default: panic(fmt.Sprintf("Unsupported for-into: %T", into)) @@ -385,24 +406,20 @@ func (c *compiler) compileLabeledForInOfStatement(into ast.ForInto, source ast.E } enterPos := -1 if forDecl, ok := into.(*ast.ForDeclaration); ok { - if binding, ok := forDecl.Binding.(*ast.BindingIdentifier); ok { - c.block = &block{ - typ: blockScope, - outer: c.block, - needResult: false, - } - c.newBlockScope() - enterPos = len(c.p.code) - c.emit(jump(1)) - c.createLexicalBinding(binding.Name, forDecl.IsConst, int(forDecl.Idx)-1) - } else { - c.throwSyntaxError(int(forDecl.Idx)-1, "Unsupported ForBinding: %T", forDecl.Binding) + c.block = &block{ + typ: blockScope, + outer: c.block, + needResult: false, } + c.newBlockScope() + enterPos = len(c.p.code) + c.emit(jump(1)) + c.createLexicalBinding(forDecl.Target, forDecl.IsConst) } c.compileExpression(source).emitGetter(true) if enterPos != -1 { s := c.scope - used := len(c.block.breaks) > 0 + used := len(c.block.breaks) > 0 || s.isDynamic() if !used { for _, b := range s.bindings { if b.useCount() > 0 { @@ -412,6 +429,11 @@ func (c *compiler) compileLabeledForInOfStatement(into ast.ForInto, source ast.E } } if used { + // We need the stack untouched because it contains the source. + // This is not the most optimal way, but it's an edge case, hopefully quite rare. + for _, b := range s.bindings { + b.moveToStash() + } enter := &enterBlock{} c.p.code[enterPos] = enter c.leaveScopeBlock(enter) @@ -421,7 +443,7 @@ func (c *compiler) compileLabeledForInOfStatement(into ast.ForInto, source ast.E c.popScope() } if iter { - c.emit(iterate) + c.emit(iterateP) } else { c.emit(enumerate) } @@ -711,43 +733,102 @@ func (c *compiler) compileReturnStatement(v *ast.ReturnStatement) { c.emit(ret) } +func (c *compiler) checkVarConflict(name unistring.String, offset int) { + for sc := c.scope; sc != nil; sc = sc.outer { + if b, exists := sc.boundNames[name]; exists && !b.isVar && !(b.isArg && sc != c.scope) { + c.throwSyntaxError(offset, "Identifier '%s' has already been declared", name) + } + if sc.function { + break + } + } +} + +func (c *compiler) emitVarAssign(name unistring.String, offset int, init compiledExpr) { + c.checkVarConflict(name, offset) + if init != nil { + c.emitVarRef(name, offset) + c.emitNamed(init, name) + c.emit(putValueP) + } +} + +func (c *compiler) compileVarBinding(expr *ast.Binding) { + switch target := expr.Target.(type) { + case *ast.Identifier: + c.emitVarAssign(target.Name, int(target.Idx)-1, c.compileExpression(expr.Initializer)) + case ast.Pattern: + c.compileExpression(expr.Initializer).emitGetter(true) + c.emitPattern(target, c.emitPatternVarAssign, false) + default: + c.throwSyntaxError(int(target.Idx0()-1), "unsupported variable binding target: %T", target) + } +} + +func (c *compiler) emitLexicalAssign(name unistring.String, offset int, init compiledExpr, isConst bool) { + b := c.scope.boundNames[name] + if b == nil { + panic("Lexical declaration for an unbound name") + } + if init != nil { + c.emitNamed(init, name) + } else { + if isConst { + c.throwSyntaxError(offset, "Missing initializer in const declaration") + } + c.emit(loadUndef) + } + if c.scope.outer != nil { + b.emitInit() + } else { + c.emit(initGlobal(name)) + } +} + +func (c *compiler) emitPatternVarAssign(target, init compiledExpr) { + id := target.(*compiledIdentifierExpr) + c.emitVarAssign(id.name, id.offset, init) +} + +func (c *compiler) emitPatternLexicalAssign(target, init compiledExpr, isConst bool) { + id := target.(*compiledIdentifierExpr) + c.emitLexicalAssign(id.name, id.offset, init, isConst) +} + +func (c *compiler) emitPatternAssign(target, init compiledExpr) { + target.emitRef() + if id, ok := target.(*compiledIdentifierExpr); ok { + c.emitNamed(init, id.name) + } else { + init.emitGetter(true) + } + c.emit(putValueP) +} + +func (c *compiler) compileLexicalBinding(expr *ast.Binding, isConst bool) { + switch target := expr.Target.(type) { + case *ast.Identifier: + c.emitLexicalAssign(target.Name, int(target.Idx)-1, c.compileExpression(expr.Initializer), isConst) + case ast.Pattern: + c.compileExpression(expr.Initializer).emitGetter(true) + c.emitPattern(target, func(target, init compiledExpr) { + c.emitPatternLexicalAssign(target, init, isConst) + }, false) + default: + c.throwSyntaxError(int(target.Idx0()-1), "unsupported lexical binding target: %T", target) + } +} + func (c *compiler) compileVariableStatement(v *ast.VariableStatement) { for _, expr := range v.List { - for sc := c.scope; sc != nil; sc = sc.outer { - if b, exists := sc.boundNames[expr.Name]; exists && !b.isVar { - c.throwSyntaxError(int(expr.Idx)-1, "Identifier '%s' has already been declared", expr.Name) - } - if sc.function { - break - } - } - c.compileExpression(expr).emitGetter(false) + c.compileVarBinding(expr) } } func (c *compiler) compileLexicalDeclaration(v *ast.LexicalDeclaration) { + isConst := v.Token == token.CONST for _, e := range v.List { - b := c.scope.boundNames[e.Name] - if b == nil { - panic("Lexical declaration for an unbound name") - } - if e.Initializer != nil { - initializer := c.compileExpression(e.Initializer) - if fn, ok := initializer.(*compiledFunctionLiteral); ok { - fn.lhsName = e.Name - } - initializer.emitGetter(true) - } else { - if v.Token == token.CONST { - c.throwSyntaxError(int(e.Idx1())-1, "Missing initializer in const declaration") - } - c.emit(loadUndef) - } - if c.scope.outer != nil { - b.emitInit() - } else { - c.emit(initGlobal(e.Name)) - } + c.compileLexicalBinding(e, isConst) } } @@ -845,34 +926,6 @@ func (c *compiler) compileGenericLabeledStatement(v ast.Statement, needResult bo c.leaveBlock() } -func (c *compiler) createLexicalBinding(name unistring.String, isConst bool, offset int) *binding { - if name == "let" { - c.throwSyntaxError(offset, "let is disallowed as a lexically bound name") - } - b, _ := c.scope.bindNameLexical(name, true, offset) - b.isConst = isConst - return b -} - -func (c *compiler) createLexicalBindings(lex *ast.LexicalDeclaration) { - for _, d := range lex.List { - c.createLexicalBinding(d.Name, lex.Token == token.CONST, int(d.Idx)-1) - } -} - -func (c *compiler) compileLexicalDeclarations(list []ast.Statement, scopeDeclared bool) bool { - for _, st := range list { - if lex, ok := st.(*ast.LexicalDeclaration); ok { - if !scopeDeclared { - c.newBlockScope() - scopeDeclared = true - } - c.createLexicalBindings(lex) - } - } - return scopeDeclared -} - func (c *compiler) compileBlockStatement(v *ast.BlockStatement, needResult bool) { var scopeDeclared bool funcs := c.extractFunctions(v.List) @@ -978,8 +1031,9 @@ func (c *compiler) compileSwitchStatement(v *ast.SwitchStatement, needResult boo } copy(bb[1:], bindings) db = &binding{ - scope: c.scope, - isConst: true, + scope: c.scope, + isConst: true, + isStrict: true, } bb[0] = db c.scope.bindings = bb diff --git a/vendor/github.com/dop251/goja/date.go b/vendor/github.com/dop251/goja/date.go index 66ac80b51..79d50b2de 100644 --- a/vendor/github.com/dop251/goja/date.go +++ b/vendor/github.com/dop251/goja/date.go @@ -2,6 +2,7 @@ package goja import ( "math" + "reflect" "time" ) @@ -24,51 +25,80 @@ type dateObject struct { msec int64 } +type dateLayoutDesc struct { + layout string + dateOnly bool +} + var ( - dateLayoutList = []string{ - "2006-01-02T15:04:05Z0700", - "2006-01-02T15:04:05", - "2006-01-02", - "2006-01-02 15:04:05", - time.RFC1123, - time.RFC1123Z, - dateTimeLayout, - time.UnixDate, - time.ANSIC, - time.RubyDate, - "Mon, 02 Jan 2006 15:04:05 GMT-0700 (MST)", - "Mon, 02 Jan 2006 15:04:05 -0700 (MST)", - - "2006", - "2006-01", - - "2006T15:04", - "2006-01T15:04", - "2006-01-02T15:04", - - "2006T15:04:05", - "2006-01T15:04:05", - - "2006T15:04Z0700", - "2006-01T15:04Z0700", - "2006-01-02T15:04Z0700", - - "2006T15:04:05Z0700", - "2006-01T15:04:05Z0700", + dateLayoutsNumeric = []dateLayoutDesc{ + {layout: "2006-01-02T15:04:05Z0700"}, + {layout: "2006-01-02T15:04:05"}, + {layout: "2006-01-02", dateOnly: true}, + {layout: "2006-01-02 15:04:05"}, + + {layout: "2006", dateOnly: true}, + {layout: "2006-01", dateOnly: true}, + + {layout: "2006T15:04"}, + {layout: "2006-01T15:04"}, + {layout: "2006-01-02T15:04"}, + + {layout: "2006T15:04:05"}, + {layout: "2006-01T15:04:05"}, + + {layout: "2006T15:04Z0700"}, + {layout: "2006-01T15:04Z0700"}, + {layout: "2006-01-02T15:04Z0700"}, + + {layout: "2006T15:04:05Z0700"}, + {layout: "2006-01T15:04:05Z0700"}, + } + + dateLayoutsAlpha = []dateLayoutDesc{ + {layout: time.RFC1123}, + {layout: time.RFC1123Z}, + {layout: dateTimeLayout}, + {layout: time.UnixDate}, + {layout: time.ANSIC}, + {layout: time.RubyDate}, + {layout: "Mon, _2 Jan 2006 15:04:05 GMT-0700 (MST)"}, + {layout: "Mon, _2 Jan 2006 15:04:05 -0700 (MST)"}, + {layout: "Jan _2, 2006", dateOnly: true}, } ) func dateParse(date string) (time.Time, bool) { var t time.Time var err error - for _, layout := range dateLayoutList { - t, err = parseDate(layout, date, time.UTC) + var layouts []dateLayoutDesc + if len(date) > 0 { + first := date[0] + if first <= '9' && (first >= '0' || first == '-' || first == '+') { + layouts = dateLayoutsNumeric + } else { + layouts = dateLayoutsAlpha + } + } else { + return time.Time{}, false + } + for _, desc := range layouts { + var defLoc *time.Location + if desc.dateOnly { + defLoc = time.UTC + } else { + defLoc = time.Local + } + t, err = parseDate(desc.layout, date, defLoc) if err == nil { break } } + if err != nil { + return time.Time{}, false + } unix := timeToMsec(t) - return t, err == nil && unix >= -maxTime && unix <= maxTime + return t, unix >= -maxTime && unix <= maxTime } func (r *Runtime) newDateObject(t time.Time, isSet bool, proto *Object) *Object { @@ -106,6 +136,10 @@ func (d *dateObject) toPrimitive() Value { return d.toPrimitiveString() } +func (d *dateObject) exportType() reflect.Type { + return typeTime +} + func (d *dateObject) export(*objectExportCtx) interface{} { if d.isSet() { return d.time() diff --git a/vendor/github.com/dop251/goja/date_parser.go b/vendor/github.com/dop251/goja/date_parser.go index 0841cf40e..f83605329 100644 --- a/vendor/github.com/dop251/goja/date_parser.go +++ b/vendor/github.com/dop251/goja/date_parser.go @@ -5,6 +5,8 @@ package goja // - 6-digit extended years are supported in place of long year (2006) in the form of +123456 // - Timezone formats tolerate colons, e.g. -0700 will parse -07:00 // - Short week day will also parse long week day +// - Short month ("Jan") will also parse long month ("January") +// - Long day ("02") will also parse short day ("2"). // - Timezone in brackets, "(MST)", will match any string in brackets (e.g. "(GMT Standard Time)") // - If offset is not set and timezone name is unknown, an error is returned // - If offset and timezone name are both set the offset takes precedence and the resulting Location will be FixedZone("", offset) @@ -133,7 +135,10 @@ func parseDate(layout, value string, defaultLocation *time.Location) (time.Time, } case stdMonth: - month, value, err = lookup(shortMonthNames, value) + month, value, err = lookup(longMonthNames, value) + if err != nil { + month, value, err = lookup(shortMonthNames, value) + } month++ case stdLongMonth: month, value, err = lookup(longMonthNames, value) @@ -155,7 +160,7 @@ func parseDate(layout, value string, defaultLocation *time.Location) (time.Time, if std == stdUnderDay && len(value) > 0 && value[0] == ' ' { value = value[1:] } - day, value, err = getnum(value, std == stdZeroDay) + day, value, err = getnum(value, false) if day < 0 { // Note that we allow any one- or two-digit day here. rangeErrString = "day" diff --git a/vendor/github.com/dop251/goja/destruct.go b/vendor/github.com/dop251/goja/destruct.go new file mode 100644 index 000000000..54b99b750 --- /dev/null +++ b/vendor/github.com/dop251/goja/destruct.go @@ -0,0 +1,277 @@ +package goja + +import ( + "github.com/dop251/goja/unistring" + "reflect" +) + +type destructKeyedSource struct { + r *Runtime + wrapped Value + usedKeys map[Value]struct{} +} + +func newDestructKeyedSource(r *Runtime, wrapped Value) *destructKeyedSource { + return &destructKeyedSource{ + r: r, + wrapped: wrapped, + } +} + +func (r *Runtime) newDestructKeyedSource(wrapped Value) *Object { + return &Object{ + runtime: r, + self: newDestructKeyedSource(r, wrapped), + } +} + +func (d *destructKeyedSource) w() objectImpl { + return d.wrapped.ToObject(d.r).self +} + +func (d *destructKeyedSource) recordKey(key Value) { + if d.usedKeys == nil { + d.usedKeys = make(map[Value]struct{}) + } + d.usedKeys[key] = struct{}{} +} + +func (d *destructKeyedSource) sortLen() int64 { + return d.w().sortLen() +} + +func (d *destructKeyedSource) sortGet(i int64) Value { + return d.w().sortGet(i) +} + +func (d *destructKeyedSource) swap(i int64, i2 int64) { + d.w().swap(i, i2) +} + +func (d *destructKeyedSource) className() string { + return d.w().className() +} + +func (d *destructKeyedSource) getStr(p unistring.String, receiver Value) Value { + d.recordKey(stringValueFromRaw(p)) + return d.w().getStr(p, receiver) +} + +func (d *destructKeyedSource) getIdx(p valueInt, receiver Value) Value { + d.recordKey(p.toString()) + return d.w().getIdx(p, receiver) +} + +func (d *destructKeyedSource) getSym(p *Symbol, receiver Value) Value { + d.recordKey(p) + return d.w().getSym(p, receiver) +} + +func (d *destructKeyedSource) getOwnPropStr(u unistring.String) Value { + d.recordKey(stringValueFromRaw(u)) + return d.w().getOwnPropStr(u) +} + +func (d *destructKeyedSource) getOwnPropIdx(v valueInt) Value { + d.recordKey(v.toString()) + return d.w().getOwnPropIdx(v) +} + +func (d *destructKeyedSource) getOwnPropSym(symbol *Symbol) Value { + d.recordKey(symbol) + return d.w().getOwnPropSym(symbol) +} + +func (d *destructKeyedSource) setOwnStr(p unistring.String, v Value, throw bool) bool { + return d.w().setOwnStr(p, v, throw) +} + +func (d *destructKeyedSource) setOwnIdx(p valueInt, v Value, throw bool) bool { + return d.w().setOwnIdx(p, v, throw) +} + +func (d *destructKeyedSource) setOwnSym(p *Symbol, v Value, throw bool) bool { + return d.w().setOwnSym(p, v, throw) +} + +func (d *destructKeyedSource) setForeignStr(p unistring.String, v, receiver Value, throw bool) (res bool, handled bool) { + return d.w().setForeignStr(p, v, receiver, throw) +} + +func (d *destructKeyedSource) setForeignIdx(p valueInt, v, receiver Value, throw bool) (res bool, handled bool) { + return d.w().setForeignIdx(p, v, receiver, throw) +} + +func (d *destructKeyedSource) setForeignSym(p *Symbol, v, receiver Value, throw bool) (res bool, handled bool) { + return d.w().setForeignSym(p, v, receiver, throw) +} + +func (d *destructKeyedSource) hasPropertyStr(u unistring.String) bool { + return d.w().hasPropertyStr(u) +} + +func (d *destructKeyedSource) hasPropertyIdx(idx valueInt) bool { + return d.w().hasPropertyIdx(idx) +} + +func (d *destructKeyedSource) hasPropertySym(s *Symbol) bool { + return d.w().hasPropertySym(s) +} + +func (d *destructKeyedSource) hasOwnPropertyStr(u unistring.String) bool { + return d.w().hasOwnPropertyStr(u) +} + +func (d *destructKeyedSource) hasOwnPropertyIdx(v valueInt) bool { + return d.w().hasOwnPropertyIdx(v) +} + +func (d *destructKeyedSource) hasOwnPropertySym(s *Symbol) bool { + return d.w().hasOwnPropertySym(s) +} + +func (d *destructKeyedSource) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool { + return d.w().defineOwnPropertyStr(name, desc, throw) +} + +func (d *destructKeyedSource) defineOwnPropertyIdx(name valueInt, desc PropertyDescriptor, throw bool) bool { + return d.w().defineOwnPropertyIdx(name, desc, throw) +} + +func (d *destructKeyedSource) defineOwnPropertySym(name *Symbol, desc PropertyDescriptor, throw bool) bool { + return d.w().defineOwnPropertySym(name, desc, throw) +} + +func (d *destructKeyedSource) deleteStr(name unistring.String, throw bool) bool { + return d.w().deleteStr(name, throw) +} + +func (d *destructKeyedSource) deleteIdx(idx valueInt, throw bool) bool { + return d.w().deleteIdx(idx, throw) +} + +func (d *destructKeyedSource) deleteSym(s *Symbol, throw bool) bool { + return d.w().deleteSym(s, throw) +} + +func (d *destructKeyedSource) toPrimitiveNumber() Value { + return d.w().toPrimitiveNumber() +} + +func (d *destructKeyedSource) toPrimitiveString() Value { + return d.w().toPrimitiveString() +} + +func (d *destructKeyedSource) toPrimitive() Value { + return d.w().toPrimitive() +} + +func (d *destructKeyedSource) assertCallable() (call func(FunctionCall) Value, ok bool) { + return d.w().assertCallable() +} + +func (d *destructKeyedSource) assertConstructor() func(args []Value, newTarget *Object) *Object { + return d.w().assertConstructor() +} + +func (d *destructKeyedSource) proto() *Object { + return d.w().proto() +} + +func (d *destructKeyedSource) setProto(proto *Object, throw bool) bool { + return d.w().setProto(proto, throw) +} + +func (d *destructKeyedSource) hasInstance(v Value) bool { + return d.w().hasInstance(v) +} + +func (d *destructKeyedSource) isExtensible() bool { + return d.w().isExtensible() +} + +func (d *destructKeyedSource) preventExtensions(throw bool) bool { + return d.w().preventExtensions(throw) +} + +type destructKeyedSourceIter struct { + d *destructKeyedSource + wrapped iterNextFunc +} + +func (i *destructKeyedSourceIter) next() (propIterItem, iterNextFunc) { + for { + item, next := i.wrapped() + if next == nil { + return item, nil + } + i.wrapped = next + if _, exists := i.d.usedKeys[stringValueFromRaw(item.name)]; !exists { + return item, i.next + } + } +} + +func (d *destructKeyedSource) enumerateOwnKeys() iterNextFunc { + return (&destructKeyedSourceIter{ + d: d, + wrapped: d.w().enumerateOwnKeys(), + }).next +} + +func (d *destructKeyedSource) export(ctx *objectExportCtx) interface{} { + return d.w().export(ctx) +} + +func (d *destructKeyedSource) exportType() reflect.Type { + return d.w().exportType() +} + +func (d *destructKeyedSource) equal(impl objectImpl) bool { + return d.w().equal(impl) +} + +func (d *destructKeyedSource) ownKeys(all bool, accum []Value) []Value { + var next iterNextFunc + if all { + next = d.enumerateOwnKeys() + } else { + next = (&enumerableIter{ + wrapped: d.enumerateOwnKeys(), + }).next + } + for item, next := next(); next != nil; item, next = next() { + accum = append(accum, stringValueFromRaw(item.name)) + } + return accum +} + +func (d *destructKeyedSource) filterUsedKeys(keys []Value) []Value { + k := 0 + for i, key := range keys { + if _, exists := d.usedKeys[key]; exists { + continue + } + if k != i { + keys[k] = key + } + k++ + } + return keys[:k] +} + +func (d *destructKeyedSource) ownSymbols(all bool, accum []Value) []Value { + return d.filterUsedKeys(d.w().ownSymbols(all, accum)) +} + +func (d *destructKeyedSource) ownPropertyKeys(all bool, accum []Value) []Value { + return d.filterUsedKeys(d.w().ownPropertyKeys(all, accum)) +} + +func (d *destructKeyedSource) _putProp(name unistring.String, value Value, writable, enumerable, configurable bool) Value { + return d.w()._putProp(name, value, writable, enumerable, configurable) +} + +func (d *destructKeyedSource) _putSym(s *Symbol, prop Value) { + d.w()._putSym(s, prop) +} diff --git a/vendor/github.com/dop251/goja/file/file.go b/vendor/github.com/dop251/goja/file/file.go index 78ae1ad90..84cedb9d9 100644 --- a/vendor/github.com/dop251/goja/file/file.go +++ b/vendor/github.com/dop251/goja/file/file.go @@ -4,6 +4,7 @@ package file import ( "fmt" + "net/url" "path" "sort" "sync" @@ -159,11 +160,8 @@ func (fl *File) Position(offset int) Position { if fl.sourceMap != nil { if source, _, row, col, ok := fl.sourceMap.Source(row, col); ok { - if !path.IsAbs(source) { - source = path.Join(path.Dir(fl.name), source) - } return Position{ - Filename: source, + Filename: ResolveSourcemapURL(fl.Name(), source).String(), Line: row, Column: col, } @@ -177,6 +175,22 @@ func (fl *File) Position(offset int) Position { } } +func ResolveSourcemapURL(basename, source string) *url.URL { + // if the url is absolute(has scheme) there is nothing to do + smURL, err := url.Parse(source) + if err == nil && !smURL.IsAbs() { + baseURL, err1 := url.Parse(basename) + if err1 == nil && path.IsAbs(baseURL.Path) { + smURL = baseURL.ResolveReference(smURL) + } else { + // pathological case where both are not absolute paths and using Resolve as above will produce an absolute + // one + smURL, _ = url.Parse(path.Join(path.Dir(basename), smURL.Path)) + } + } + return smURL +} + func findNextLineStart(s string) int { for pos, ch := range s { switch ch { diff --git a/vendor/github.com/dop251/goja/func.go b/vendor/github.com/dop251/goja/func.go index 7b3a89992..3611c0068 100644 --- a/vendor/github.com/dop251/goja/func.go +++ b/vendor/github.com/dop251/goja/func.go @@ -9,15 +9,26 @@ import ( type baseFuncObject struct { baseObject - nameProp, lenProp valueProperty + lenProp valueProperty } -type funcObject struct { +type baseJsFuncObject struct { baseFuncObject - stash *stash - prg *Program - src string + stash *stash + prg *Program + src string + strict bool +} + +type funcObject struct { + baseJsFuncObject +} + +type arrowFuncObject struct { + baseJsFuncObject + this Value + newTarget Value } type nativeFuncObject struct { @@ -129,18 +140,18 @@ func (f *funcObject) Call(call FunctionCall) Value { return f.call(call, nil) } -func (f *funcObject) call(call FunctionCall, newTarget Value) Value { +func (f *arrowFuncObject) Call(call FunctionCall) Value { + return f._call(call, f.newTarget, f.this) +} + +func (f *baseJsFuncObject) _call(call FunctionCall, newTarget, this Value) Value { vm := f.val.runtime.vm pc := vm.pc vm.stack.expand(vm.sp + len(call.Arguments) + 1) vm.stack[vm.sp] = f.val vm.sp++ - if call.This != nil { - vm.stack[vm.sp] = call.This - } else { - vm.stack[vm.sp] = _undefined - } + vm.stack[vm.sp] = this vm.sp++ for _, arg := range call.Arguments { if arg != nil { @@ -162,6 +173,11 @@ func (f *funcObject) call(call FunctionCall, newTarget Value) Value { vm.pc = pc vm.halt = false return vm.pop() + +} + +func (f *funcObject) call(call FunctionCall, newTarget Value) Value { + return f._call(call, newTarget, nilSafe(call.This)) } func (f *funcObject) export(*objectExportCtx) interface{} { @@ -180,14 +196,18 @@ func (f *funcObject) assertConstructor() func(args []Value, newTarget *Object) * return f.construct } +func (f *arrowFuncObject) exportType() reflect.Type { + return reflect.TypeOf(f.Call) +} + +func (f *arrowFuncObject) assertCallable() (func(FunctionCall) Value, bool) { + return f.Call, true +} + func (f *baseFuncObject) init(name unistring.String, length int) { f.baseObject.init() - if name != "" { - f.nameProp.configurable = true - f.nameProp.value = stringValueFromRaw(name) - f._put("name", &f.nameProp) - } + f._putProp("name", stringValueFromRaw(name), false, false, true) f.lenProp.configurable = true f.lenProp.value = valueInt(length) diff --git a/vendor/github.com/dop251/goja/go.mod b/vendor/github.com/dop251/goja/go.mod deleted file mode 100644 index 64dae78b2..000000000 --- a/vendor/github.com/dop251/goja/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/dop251/goja - -go 1.14 - -require ( - github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 - github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 - github.com/go-sourcemap/sourcemap v2.1.3+incompatible - github.com/kr/text v0.2.0 // indirect - golang.org/x/text v0.3.6 - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/yaml.v2 v2.4.0 -) diff --git a/vendor/github.com/dop251/goja/go.sum b/vendor/github.com/dop251/goja/go.sum deleted file mode 100644 index 34f3c442b..000000000 --- a/vendor/github.com/dop251/goja/go.sum +++ /dev/null @@ -1,23 +0,0 @@ -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= -github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0NkkzGEh3z21mSWggMg4LwLRFucLS7TjARg= -github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= -github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= -github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/vendor/github.com/dop251/goja/object.go b/vendor/github.com/dop251/goja/object.go index 3504afac9..049f456c9 100644 --- a/vendor/github.com/dop251/goja/object.go +++ b/vendor/github.com/dop251/goja/object.go @@ -22,10 +22,12 @@ const ( classString = "String" classBoolean = "Boolean" classError = "Error" + classAggError = "AggregateError" classRegExp = "RegExp" classDate = "Date" classJSON = "JSON" classGlobal = "global" + classPromise = "Promise" classArrayIterator = "Array Iterator" classMapIterator = "Map Iterator" @@ -65,6 +67,18 @@ func (p *PropertyDescriptor) Empty() bool { return *p == empty } +func (p *PropertyDescriptor) IsAccessor() bool { + return p.Setter != nil || p.Getter != nil +} + +func (p *PropertyDescriptor) IsData() bool { + return p.Value != nil || p.Writable != FLAG_NOT_SET +} + +func (p *PropertyDescriptor) IsGeneric() bool { + return !p.IsAccessor() && !p.IsData() +} + func (p *PropertyDescriptor) toValue(r *Runtime) Value { if p.jsDescriptor != nil { return p.jsDescriptor @@ -399,7 +413,7 @@ func (o *baseObject) deleteIdx(idx valueInt, throw bool) bool { func (o *baseObject) deleteSym(s *Symbol, throw bool) bool { if o.symValues != nil { if val := o.symValues.get(s); val != nil { - if !o.checkDelete(s.desc.string(), val, throw) { + if !o.checkDelete(s.descriptiveString().string(), val, throw) { return false } o.symValues.remove(s) @@ -744,7 +758,7 @@ func (o *baseObject) defineOwnPropertySym(s *Symbol, descr PropertyDescriptor, t if o.symValues != nil { existingVal = o.symValues.get(s) } - if v, ok := o._defineOwnProperty(s.desc.string(), existingVal, descr, throw); ok { + if v, ok := o._defineOwnProperty(s.descriptiveString().string(), existingVal, descr, throw); ok { if o.symValues == nil { o.symValues = newOrderedMap(nil) } @@ -1121,9 +1135,9 @@ func (o *baseObject) fixPropOrder() { names := o.propNames for i := o.lastSortedPropLen; i < len(names); i++ { name := names[i] - if idx := strToIdx(name); idx != math.MaxUint32 { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { k := sort.Search(o.idxPropCount, func(j int) bool { - return strToIdx(names[j]) >= idx + return strToArrayIdx(names[j]) >= idx }) if k < i { if namesMarkedForCopy(names) { diff --git a/vendor/github.com/dop251/goja/object_dynamic.go b/vendor/github.com/dop251/goja/object_dynamic.go index c0bbf996d..6cf97d67f 100644 --- a/vendor/github.com/dop251/goja/object_dynamic.go +++ b/vendor/github.com/dop251/goja/object_dynamic.go @@ -531,7 +531,7 @@ func (a *dynamicArray) getStr(p unistring.String, receiver Value) Value { if p == "length" { return intToValue(int64(a.a.Len())) } - if idx, ok := strPropToInt(p); ok { + if idx, ok := strToInt(p); ok { return a.a.Get(idx) } return a.getParentStr(p, receiver) @@ -551,7 +551,7 @@ func (a *dynamicArray) getOwnPropStr(u unistring.String) Value { writable: true, } } - if idx, ok := strPropToInt(u); ok { + if idx, ok := strToInt(u); ok { return a.a.Get(idx) } return nil @@ -573,7 +573,7 @@ func (a *dynamicArray) setOwnStr(p unistring.String, v Value, throw bool) bool { if p == "length" { return a._setLen(v, throw) } - if idx, ok := strPropToInt(p); ok { + if idx, ok := strToInt(p); ok { return a._setIdx(idx, v, throw) } a.val.runtime.typeErrorResult(throw, "Cannot set property %q on a dynamic array", p.String()) @@ -628,7 +628,7 @@ func (a *dynamicArray) hasOwnPropertyStr(u unistring.String) bool { if u == "length" { return true } - if idx, ok := strPropToInt(u); ok { + if idx, ok := strToInt(u); ok { return a._has(idx) } return false @@ -640,7 +640,7 @@ func (a *dynamicArray) hasOwnPropertyIdx(v valueInt) bool { func (a *dynamicArray) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool { if a.checkDynamicObjectPropertyDescr(name, desc, throw) { - if idx, ok := strPropToInt(name); ok { + if idx, ok := strToInt(name); ok { return a._setIdx(idx, desc.Value, throw) } a.val.runtime.typeErrorResult(throw, "Cannot define property %q on a dynamic array", name.String()) @@ -663,7 +663,7 @@ func (a *dynamicArray) _delete(idx int, throw bool) bool { } func (a *dynamicArray) deleteStr(name unistring.String, throw bool) bool { - if idx, ok := strPropToInt(name); ok { + if idx, ok := strToInt(name); ok { return a._delete(idx, throw) } if a.hasOwnPropertyStr(name) { diff --git a/vendor/github.com/dop251/goja/object_goslice.go b/vendor/github.com/dop251/goja/object_goslice.go index 0c2b2322b..47d392b95 100644 --- a/vendor/github.com/dop251/goja/object_goslice.go +++ b/vendor/github.com/dop251/goja/object_goslice.go @@ -187,7 +187,7 @@ func (o *objectGoSlice) hasOwnPropertyStr(name unistring.String) bool { if idx := strToIdx64(name); idx >= 0 { return idx < int64(len(*o.data)) } - return false + return name == "length" } func (o *objectGoSlice) defineOwnPropertyIdx(idx valueInt, descr PropertyDescriptor, throw bool) bool { diff --git a/vendor/github.com/dop251/goja/object_goslice_reflect.go b/vendor/github.com/dop251/goja/object_goslice_reflect.go index 658c65f9b..9c9ea1f46 100644 --- a/vendor/github.com/dop251/goja/object_goslice_reflect.go +++ b/vendor/github.com/dop251/goja/object_goslice_reflect.go @@ -196,7 +196,7 @@ func (o *objectGoSliceReflect) setForeignIdx(idx valueInt, val, receiver Value, } func (o *objectGoSliceReflect) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { - return o._setForeignStr(name, trueValIfPresent(o._hasStr(name)), val, receiver, throw) + return o._setForeignStr(name, trueValIfPresent(o.hasOwnPropertyStr(name)), val, receiver, throw) } func (o *objectGoSliceReflect) hasOwnPropertyIdx(idx valueInt) bool { @@ -204,7 +204,7 @@ func (o *objectGoSliceReflect) hasOwnPropertyIdx(idx valueInt) bool { } func (o *objectGoSliceReflect) hasOwnPropertyStr(name unistring.String) bool { - if o._hasStr(name) { + if o._hasStr(name) || name == "length" { return true } return o.objectGoReflect._has(name.String()) diff --git a/vendor/github.com/dop251/goja/parser/expression.go b/vendor/github.com/dop251/goja/parser/expression.go index b6279c615..4d425876b 100644 --- a/vendor/github.com/dop251/goja/parser/expression.go +++ b/vendor/github.com/dop251/goja/parser/expression.go @@ -1,6 +1,8 @@ package parser import ( + "strings" + "github.com/dop251/goja/ast" "github.com/dop251/goja/file" "github.com/dop251/goja/token" @@ -83,10 +85,9 @@ func (self *_parser) parsePrimaryExpression() ast.Expression { case token.LEFT_BRACKET: return self.parseArrayLiteral() case token.LEFT_PARENTHESIS: - self.expect(token.LEFT_PARENTHESIS) - expression := self.parseExpression() - self.expect(token.RIGHT_PARENTHESIS) - return expression + return self.parseParenthesisedExpression() + case token.BACKTICK: + return self.parseTemplateLiteral(false) case token.THIS: self.next() return &ast.ThisExpression{ @@ -101,6 +102,76 @@ func (self *_parser) parsePrimaryExpression() ast.Expression { return &ast.BadExpression{From: idx, To: self.idx} } +func (self *_parser) reinterpretSequenceAsArrowFuncParams(seq *ast.SequenceExpression) *ast.ParameterList { + firstRestIdx := -1 + params := make([]*ast.Binding, 0, len(seq.Sequence)) + for i, item := range seq.Sequence { + if _, ok := item.(*ast.SpreadElement); ok { + if firstRestIdx == -1 { + firstRestIdx = i + continue + } + } + if firstRestIdx != -1 { + self.error(seq.Sequence[firstRestIdx].Idx0(), "Rest parameter must be last formal parameter") + return &ast.ParameterList{} + } + params = append(params, self.reinterpretAsBinding(item)) + } + var rest ast.Expression + if firstRestIdx != -1 { + rest = self.reinterpretAsBindingRestElement(seq.Sequence[firstRestIdx]) + } + return &ast.ParameterList{ + List: params, + Rest: rest, + } +} + +func (self *_parser) parseParenthesisedExpression() ast.Expression { + opening := self.idx + self.expect(token.LEFT_PARENTHESIS) + var list []ast.Expression + if self.token != token.RIGHT_PARENTHESIS { + for { + if self.token == token.ELLIPSIS { + start := self.idx + self.errorUnexpectedToken(token.ELLIPSIS) + self.next() + expr := self.parseAssignmentExpression() + list = append(list, &ast.BadExpression{ + From: start, + To: expr.Idx1(), + }) + } else { + list = append(list, self.parseAssignmentExpression()) + } + if self.token != token.COMMA { + break + } + self.next() + if self.token == token.RIGHT_PARENTHESIS { + self.errorUnexpectedToken(token.RIGHT_PARENTHESIS) + break + } + } + } + self.expect(token.RIGHT_PARENTHESIS) + if len(list) == 1 && len(self.errors) == 0 { + return list[0] + } + if len(list) == 0 { + self.errorUnexpectedToken(token.RIGHT_PARENTHESIS) + return &ast.BadExpression{ + From: opening, + To: self.idx, + } + } + return &ast.SequenceExpression{ + Sequence: list, + } +} + func (self *_parser) parseRegExpLiteral() *ast.RegExpLiteral { offset := self.chrOffset - 1 // Opening slash already gotten @@ -140,22 +211,33 @@ func (self *_parser) parseRegExpLiteral() *ast.RegExpLiteral { } } -func (self *_parser) parseVariableDeclaration(declarationList *[]*ast.VariableExpression) ast.Expression { +func (self *_parser) parseBindingTarget() (target ast.BindingTarget) { if self.token == token.LET { self.token = token.IDENTIFIER } - if self.token != token.IDENTIFIER { + switch self.token { + case token.IDENTIFIER: + target = &ast.Identifier{ + Name: self.parsedLiteral, + Idx: self.idx, + } + self.next() + case token.LEFT_BRACKET: + target = self.parseArrayBindingPattern() + case token.LEFT_BRACE: + target = self.parseObjectBindingPattern() + default: idx := self.expect(token.IDENTIFIER) self.nextStatement() - return &ast.BadExpression{From: idx, To: self.idx} + target = &ast.BadExpression{From: idx, To: self.idx} } - name := self.parsedLiteral - idx := self.idx - self.next() - node := &ast.VariableExpression{ - Name: name, - Idx: idx, + return +} + +func (self *_parser) parseVariableDeclaration(declarationList *[]*ast.Binding) ast.Expression { + node := &ast.Binding{ + Target: self.parseBindingTarget(), } if declarationList != nil { @@ -170,7 +252,7 @@ func (self *_parser) parseVariableDeclaration(declarationList *[]*ast.VariableEx return node } -func (self *_parser) parseVariableDeclarationList() (declarationList []*ast.VariableExpression) { +func (self *_parser) parseVariableDeclarationList() (declarationList []*ast.Binding) { for { self.parseVariableDeclaration(&declarationList) if self.token != token.COMMA { @@ -181,7 +263,7 @@ func (self *_parser) parseVariableDeclarationList() (declarationList []*ast.Vari return } -func (self *_parser) parseVarDeclarationList(var_ file.Idx) []*ast.VariableExpression { +func (self *_parser) parseVarDeclarationList(var_ file.Idx) []*ast.Binding { declarationList := self.parseVariableDeclarationList() self.scope.declare(&ast.VariableDeclaration{ @@ -193,6 +275,12 @@ func (self *_parser) parseVarDeclarationList(var_ file.Idx) []*ast.VariableExpre } func (self *_parser) parseObjectPropertyKey() (unistring.String, ast.Expression, token.Token) { + if self.token == token.LEFT_BRACKET { + self.next() + expr := self.parseAssignmentExpression() + self.expect(token.RIGHT_BRACKET) + return "", expr, token.ILLEGAL + } idx, tkn, literal, parsedLiteral := self.idx, self.token, self.literal, self.parsedLiteral var value ast.Expression self.next() @@ -228,15 +316,21 @@ func (self *_parser) parseObjectPropertyKey() (unistring.String, ast.Expression, Literal: literal, Value: unistring.String(literal), } - tkn = token.STRING + tkn = token.KEYWORD } } return parsedLiteral, value, tkn } func (self *_parser) parseObjectProperty() ast.Property { + if self.token == token.ELLIPSIS { + self.next() + return &ast.SpreadElement{ + Expression: self.parseAssignmentExpression(), + } + } literal, value, tkn := self.parseObjectPropertyKey() - if tkn == token.IDENTIFIER || tkn == token.STRING { + if tkn == token.IDENTIFIER || tkn == token.STRING || tkn == token.KEYWORD || tkn == token.ILLEGAL { switch { case self.token == token.LEFT_PARENTHESIS: idx := self.idx @@ -246,35 +340,46 @@ func (self *_parser) parseObjectProperty() ast.Property { Function: idx, ParameterList: parameterList, } - self.parseFunctionBlock(node) + node.Body, node.DeclarationList = self.parseFunctionBlock() - return ast.Property{ + return &ast.PropertyKeyed{ Key: value, - Kind: "method", + Kind: ast.PropertyKindMethod, Value: node, } - case self.token == token.COMMA || self.token == token.RIGHT_BRACE: // shorthand property - return ast.Property{ - Key: value, - Kind: "value", - Value: &ast.Identifier{ - Name: literal, - Idx: self.idx, - }, + case self.token == token.COMMA || self.token == token.RIGHT_BRACE || self.token == token.ASSIGN: // shorthand property + if tkn == token.IDENTIFIER || tkn == token.KEYWORD && literal == "let" { + var initializer ast.Expression + if self.token == token.ASSIGN { + // allow the initializer syntax here in case the object literal + // needs to be reinterpreted as an assignment pattern, enforce later if it doesn't. + self.next() + initializer = self.parseAssignmentExpression() + } + return &ast.PropertyShort{ + Name: ast.Identifier{ + Name: literal, + Idx: value.Idx0(), + }, + Initializer: initializer, + } } case literal == "get" && self.token != token.COLON: idx := self.idx _, value, _ := self.parseObjectPropertyKey() + idx1 := self.idx parameterList := self.parseFunctionParameterList() - + if len(parameterList.List) > 0 || parameterList.Rest != nil { + self.error(idx1, "Getter must not have any formal parameters.") + } node := &ast.FunctionLiteral{ Function: idx, ParameterList: parameterList, } - self.parseFunctionBlock(node) - return ast.Property{ + node.Body, node.DeclarationList = self.parseFunctionBlock() + return &ast.PropertyKeyed{ Key: value, - Kind: "get", + Kind: ast.PropertyKindGet, Value: node, } case literal == "set" && self.token != token.COLON: @@ -287,11 +392,11 @@ func (self *_parser) parseObjectProperty() ast.Property { ParameterList: parameterList, } - self.parseFunctionBlock(node) + node.Body, node.DeclarationList = self.parseFunctionBlock() - return ast.Property{ + return &ast.PropertyKeyed{ Key: value, - Kind: "set", + Kind: ast.PropertyKindSet, Value: node, } } @@ -299,14 +404,14 @@ func (self *_parser) parseObjectProperty() ast.Property { self.expect(token.COLON) - return ast.Property{ + return &ast.PropertyKeyed{ Key: value, - Kind: "value", + Kind: ast.PropertyKindValue, Value: self.parseAssignmentExpression(), } } -func (self *_parser) parseObjectLiteral() ast.Expression { +func (self *_parser) parseObjectLiteral() *ast.ObjectLiteral { var value []ast.Property idx0 := self.expect(token.LEFT_BRACE) for self.token != token.RIGHT_BRACE && self.token != token.EOF { @@ -327,7 +432,7 @@ func (self *_parser) parseObjectLiteral() ast.Expression { } } -func (self *_parser) parseArrayLiteral() ast.Expression { +func (self *_parser) parseArrayLiteral() *ast.ArrayLiteral { idx0 := self.expect(token.LEFT_BRACKET) var value []ast.Expression @@ -337,7 +442,14 @@ func (self *_parser) parseArrayLiteral() ast.Expression { value = append(value, nil) continue } - value = append(value, self.parseAssignmentExpression()) + if self.token == token.ELLIPSIS { + self.next() + value = append(value, &ast.SpreadElement{ + Expression: self.parseAssignmentExpression(), + }) + } else { + value = append(value, self.parseAssignmentExpression()) + } if self.token != token.RIGHT_BRACKET { self.expect(token.COMMA) } @@ -351,11 +463,60 @@ func (self *_parser) parseArrayLiteral() ast.Expression { } } +func (self *_parser) parseTemplateLiteral(tagged bool) *ast.TemplateLiteral { + res := &ast.TemplateLiteral{ + OpenQuote: self.idx, + } + for self.chr != -1 { + start := self.idx + 1 + literal, parsed, finished, parseErr, err := self.parseTemplateCharacters() + if err != nil { + self.error(self.idx, err.Error()) + } + res.Elements = append(res.Elements, &ast.TemplateElement{ + Idx: start, + Literal: literal, + Parsed: parsed, + Valid: parseErr == nil, + }) + if !tagged && parseErr != nil { + self.error(self.idx, parseErr.Error()) + } + end := self.idx + 1 + self.next() + if finished { + res.CloseQuote = end + break + } + expr := self.parseExpression() + res.Expressions = append(res.Expressions, expr) + if self.token != token.RIGHT_BRACE { + self.errorUnexpectedToken(self.token) + } + } + return res +} + +func (self *_parser) parseTaggedTemplateLiteral(tag ast.Expression) *ast.TemplateLiteral { + l := self.parseTemplateLiteral(true) + l.Tag = tag + return l +} + func (self *_parser) parseArgumentList() (argumentList []ast.Expression, idx0, idx1 file.Idx) { idx0 = self.expect(token.LEFT_PARENTHESIS) if self.token != token.RIGHT_PARENTHESIS { for { - argumentList = append(argumentList, self.parseAssignmentExpression()) + var item ast.Expression + if self.token == token.ELLIPSIS { + self.next() + item = &ast.SpreadElement{ + Expression: self.parseAssignmentExpression(), + } + } else { + item = self.parseAssignmentExpression() + } + argumentList = append(argumentList, item) if self.token != token.COMMA { break } @@ -452,14 +613,17 @@ func (self *_parser) parseLeftHandSideExpression() ast.Expression { } else { left = self.parsePrimaryExpression() } - +L: for { - if self.token == token.PERIOD { + switch self.token { + case token.PERIOD: left = self.parseDotMember(left) - } else if self.token == token.LEFT_BRACKET { + case token.LEFT_BRACKET: left = self.parseBracketMember(left) - } else { - break + case token.BACKTICK: + left = self.parseTaggedTemplateLiteral(left) + default: + break L } } @@ -480,16 +644,19 @@ func (self *_parser) parseLeftHandSideExpressionAllowCall() ast.Expression { } else { left = self.parsePrimaryExpression() } - +L: for { - if self.token == token.PERIOD { + switch self.token { + case token.PERIOD: left = self.parseDotMember(left) - } else if self.token == token.LEFT_BRACKET { + case token.LEFT_BRACKET: left = self.parseBracketMember(left) - } else if self.token == token.LEFT_PARENTHESIS { + case token.LEFT_PARENTHESIS: left = self.parseCallExpression(left) - } else { - break + case token.BACKTICK: + left = self.parseTaggedTemplateLiteral(left) + default: + break L } } @@ -763,7 +930,7 @@ func (self *_parser) parseLogicalOrExpression() ast.Expression { return left } -func (self *_parser) parseConditionlExpression() ast.Expression { +func (self *_parser) parseConditionalExpression() ast.Expression { left := self.parseLogicalOrExpression() if self.token == token.QUESTION_MARK { @@ -781,10 +948,16 @@ func (self *_parser) parseConditionlExpression() ast.Expression { } func (self *_parser) parseAssignmentExpression() ast.Expression { + start := self.idx + parenthesis := false + var state parserState if self.token == token.LET { self.token = token.IDENTIFIER + } else if self.token == token.LEFT_PARENTHESIS { + self.mark(&state) + parenthesis = true } - left := self.parseConditionlExpression() + left := self.parseConditionalExpression() var operator token.Token switch self.token { case token.ASSIGN: @@ -811,23 +984,65 @@ func (self *_parser) parseAssignmentExpression() ast.Expression { operator = token.SHIFT_RIGHT case token.UNSIGNED_SHIFT_RIGHT_ASSIGN: operator = token.UNSIGNED_SHIFT_RIGHT + case token.ARROW: + var paramList *ast.ParameterList + if id, ok := left.(*ast.Identifier); ok { + paramList = &ast.ParameterList{ + Opening: id.Idx, + Closing: id.Idx1(), + List: []*ast.Binding{{ + Target: id, + }}, + } + } else if parenthesis { + if seq, ok := left.(*ast.SequenceExpression); ok && len(self.errors) == 0 { + paramList = self.reinterpretSequenceAsArrowFuncParams(seq) + } else { + self.restore(&state) + paramList = self.parseFunctionParameterList() + } + } else { + self.error(left.Idx0(), "Malformed arrow function parameter list") + return &ast.BadExpression{From: left.Idx0(), To: left.Idx1()} + } + self.expect(token.ARROW) + node := &ast.ArrowFunctionLiteral{ + Start: start, + ParameterList: paramList, + } + node.Body, node.DeclarationList = self.parseArrowFunctionBody() + node.Source = self.slice(node.Start, node.Body.Idx1()) + return node } if operator != 0 { idx := self.idx self.next() - switch left.(type) { + ok := false + switch l := left.(type) { case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression: - default: - self.error(left.Idx0(), "Invalid left-hand side in assignment") - self.nextStatement() - return &ast.BadExpression{From: idx, To: self.idx} + ok = true + case *ast.ArrayLiteral: + if !parenthesis && operator == token.ASSIGN { + left = self.reinterpretAsArrayAssignmentPattern(l) + ok = true + } + case *ast.ObjectLiteral: + if !parenthesis && operator == token.ASSIGN { + left = self.reinterpretAsObjectAssignmentPattern(l) + ok = true + } } - return &ast.AssignExpression{ - Left: left, - Operator: operator, - Right: self.parseAssignmentExpression(), + if ok { + return &ast.AssignExpression{ + Left: left, + Operator: operator, + Right: self.parseAssignmentExpression(), + } } + self.error(left.Idx0(), "Invalid left-hand side in assignment") + self.nextStatement() + return &ast.BadExpression{From: idx, To: self.idx} } return left @@ -856,3 +1071,252 @@ func (self *_parser) parseExpression() ast.Expression { return left } + +func (self *_parser) checkComma(from, to file.Idx) { + if pos := strings.IndexByte(self.str[int(from)-self.base:int(to)-self.base], ','); pos >= 0 { + self.error(from+file.Idx(pos), "Comma is not allowed here") + } +} + +func (self *_parser) reinterpretAsArrayAssignmentPattern(left *ast.ArrayLiteral) ast.Expression { + value := left.Value + var rest ast.Expression + for i, item := range value { + if spread, ok := item.(*ast.SpreadElement); ok { + if i != len(value)-1 { + self.error(item.Idx0(), "Rest element must be last element") + return &ast.BadExpression{From: left.Idx0(), To: left.Idx1()} + } + self.checkComma(spread.Expression.Idx1(), left.RightBracket) + rest = self.reinterpretAsDestructAssignTarget(spread.Expression) + value = value[:len(value)-1] + } else { + value[i] = self.reinterpretAsAssignmentElement(item) + } + } + return &ast.ArrayPattern{ + LeftBracket: left.LeftBracket, + RightBracket: left.RightBracket, + Elements: value, + Rest: rest, + } +} + +func (self *_parser) reinterpretArrayAssignPatternAsBinding(pattern *ast.ArrayPattern) *ast.ArrayPattern { + for i, item := range pattern.Elements { + pattern.Elements[i] = self.reinterpretAsDestructBindingTarget(item) + } + if pattern.Rest != nil { + pattern.Rest = self.reinterpretAsDestructBindingTarget(pattern.Rest) + } + return pattern +} + +func (self *_parser) reinterpretAsArrayBindingPattern(left *ast.ArrayLiteral) ast.BindingTarget { + value := left.Value + var rest ast.Expression + for i, item := range value { + if spread, ok := item.(*ast.SpreadElement); ok { + if i != len(value)-1 { + self.error(item.Idx0(), "Rest element must be last element") + return &ast.BadExpression{From: left.Idx0(), To: left.Idx1()} + } + self.checkComma(spread.Expression.Idx1(), left.RightBracket) + rest = self.reinterpretAsDestructBindingTarget(spread.Expression) + value = value[:len(value)-1] + } else { + value[i] = self.reinterpretAsBindingElement(item) + } + } + return &ast.ArrayPattern{ + LeftBracket: left.LeftBracket, + RightBracket: left.RightBracket, + Elements: value, + Rest: rest, + } +} + +func (self *_parser) parseArrayBindingPattern() ast.BindingTarget { + return self.reinterpretAsArrayBindingPattern(self.parseArrayLiteral()) +} + +func (self *_parser) parseObjectBindingPattern() ast.BindingTarget { + return self.reinterpretAsObjectBindingPattern(self.parseObjectLiteral()) +} + +func (self *_parser) reinterpretArrayObjectPatternAsBinding(pattern *ast.ObjectPattern) *ast.ObjectPattern { + for _, prop := range pattern.Properties { + if keyed, ok := prop.(*ast.PropertyKeyed); ok { + keyed.Value = self.reinterpretAsBindingElement(keyed.Value) + } + } + if pattern.Rest != nil { + pattern.Rest = self.reinterpretAsBindingRestElement(pattern.Rest) + } + return pattern +} + +func (self *_parser) reinterpretAsObjectBindingPattern(expr *ast.ObjectLiteral) ast.BindingTarget { + var rest ast.Expression + value := expr.Value + for i, prop := range value { + ok := false + switch prop := prop.(type) { + case *ast.PropertyKeyed: + if prop.Kind == ast.PropertyKindValue { + prop.Value = self.reinterpretAsBindingElement(prop.Value) + ok = true + } + case *ast.PropertyShort: + ok = true + case *ast.SpreadElement: + if i != len(expr.Value)-1 { + self.error(prop.Idx0(), "Rest element must be last element") + return &ast.BadExpression{From: expr.Idx0(), To: expr.Idx1()} + } + // TODO make sure there is no trailing comma + rest = self.reinterpretAsBindingRestElement(prop.Expression) + value = value[:i] + ok = true + } + if !ok { + self.error(prop.Idx0(), "Invalid destructuring binding target") + return &ast.BadExpression{From: expr.Idx0(), To: expr.Idx1()} + } + } + return &ast.ObjectPattern{ + LeftBrace: expr.LeftBrace, + RightBrace: expr.RightBrace, + Properties: value, + Rest: rest, + } +} + +func (self *_parser) reinterpretAsObjectAssignmentPattern(l *ast.ObjectLiteral) ast.Expression { + var rest ast.Expression + value := l.Value + for i, prop := range value { + ok := false + switch prop := prop.(type) { + case *ast.PropertyKeyed: + if prop.Kind == ast.PropertyKindValue { + prop.Value = self.reinterpretAsAssignmentElement(prop.Value) + ok = true + } + case *ast.PropertyShort: + ok = true + case *ast.SpreadElement: + if i != len(l.Value)-1 { + self.error(prop.Idx0(), "Rest element must be last element") + return &ast.BadExpression{From: l.Idx0(), To: l.Idx1()} + } + // TODO make sure there is no trailing comma + rest = prop.Expression + value = value[:i] + ok = true + } + if !ok { + self.error(prop.Idx0(), "Invalid destructuring assignment target") + return &ast.BadExpression{From: l.Idx0(), To: l.Idx1()} + } + } + return &ast.ObjectPattern{ + LeftBrace: l.LeftBrace, + RightBrace: l.RightBrace, + Properties: value, + Rest: rest, + } +} + +func (self *_parser) reinterpretAsAssignmentElement(expr ast.Expression) ast.Expression { + switch expr := expr.(type) { + case *ast.AssignExpression: + if expr.Operator == token.ASSIGN { + expr.Left = self.reinterpretAsDestructAssignTarget(expr.Left) + return expr + } else { + self.error(expr.Idx0(), "Invalid destructuring assignment target") + return &ast.BadExpression{From: expr.Idx0(), To: expr.Idx1()} + } + default: + return self.reinterpretAsDestructAssignTarget(expr) + } +} + +func (self *_parser) reinterpretAsBindingElement(expr ast.Expression) ast.Expression { + switch expr := expr.(type) { + case *ast.AssignExpression: + if expr.Operator == token.ASSIGN { + expr.Left = self.reinterpretAsDestructBindingTarget(expr.Left) + return expr + } else { + self.error(expr.Idx0(), "Invalid destructuring assignment target") + return &ast.BadExpression{From: expr.Idx0(), To: expr.Idx1()} + } + default: + return self.reinterpretAsDestructBindingTarget(expr) + } +} + +func (self *_parser) reinterpretAsBinding(expr ast.Expression) *ast.Binding { + switch expr := expr.(type) { + case *ast.AssignExpression: + if expr.Operator == token.ASSIGN { + return &ast.Binding{ + Target: self.reinterpretAsDestructBindingTarget(expr.Left), + Initializer: expr.Right, + } + } else { + self.error(expr.Idx0(), "Invalid destructuring assignment target") + return &ast.Binding{ + Target: &ast.BadExpression{From: expr.Idx0(), To: expr.Idx1()}, + } + } + default: + return &ast.Binding{ + Target: self.reinterpretAsDestructBindingTarget(expr), + } + } +} + +func (self *_parser) reinterpretAsDestructAssignTarget(item ast.Expression) ast.Expression { + switch item := item.(type) { + case nil: + return nil + case *ast.ArrayLiteral: + return self.reinterpretAsArrayAssignmentPattern(item) + case *ast.ObjectLiteral: + return self.reinterpretAsObjectAssignmentPattern(item) + case ast.Pattern, *ast.Identifier, *ast.DotExpression, *ast.BracketExpression: + return item + } + self.error(item.Idx0(), "Invalid destructuring assignment target") + return &ast.BadExpression{From: item.Idx0(), To: item.Idx1()} +} + +func (self *_parser) reinterpretAsDestructBindingTarget(item ast.Expression) ast.BindingTarget { + switch item := item.(type) { + case nil: + return nil + case *ast.ArrayPattern: + return self.reinterpretArrayAssignPatternAsBinding(item) + case *ast.ObjectPattern: + return self.reinterpretArrayObjectPatternAsBinding(item) + case *ast.ArrayLiteral: + return self.reinterpretAsArrayBindingPattern(item) + case *ast.ObjectLiteral: + return self.reinterpretAsObjectBindingPattern(item) + case *ast.Identifier: + return item + } + self.error(item.Idx0(), "Invalid destructuring binding target") + return &ast.BadExpression{From: item.Idx0(), To: item.Idx1()} +} + +func (self *_parser) reinterpretAsBindingRestElement(expr ast.Expression) ast.Expression { + if _, ok := expr.(*ast.Identifier); ok { + return expr + } + self.error(expr.Idx0(), "Invalid binding rest") + return &ast.BadExpression{From: expr.Idx0(), To: expr.Idx1()} +} diff --git a/vendor/github.com/dop251/goja/parser/lexer.go b/vendor/github.com/dop251/goja/parser/lexer.go index ce81cfc07..c76953c8c 100644 --- a/vendor/github.com/dop251/goja/parser/lexer.go +++ b/vendor/github.com/dop251/goja/parser/lexer.go @@ -104,7 +104,8 @@ func (self *_parser) scanIdentifier() (string, unistring.String, bool, error) { var parsed unistring.String if hasEscape || isUnicode { var err error - parsed, err = parseStringLiteral1(literal, length, isUnicode) + // TODO strict + parsed, err = parseStringLiteral(literal, length, isUnicode, false) if err != nil { return "", "", false, err } @@ -184,6 +185,33 @@ func isId(tkn token.Token) bool { return false } +type parserState struct { + tok token.Token + literal string + parsedLiteral unistring.String + implicitSemicolon, insertSemicolon bool + chr rune + chrOffset, offset int + errorCount int +} + +func (self *_parser) mark(state *parserState) *parserState { + if state == nil { + state = &parserState{} + } + state.tok, state.literal, state.parsedLiteral, state.implicitSemicolon, state.insertSemicolon, state.chr, state.chrOffset, state.offset = + self.token, self.literal, self.parsedLiteral, self.implicitSemicolon, self.insertSemicolon, self.chr, self.chrOffset, self.offset + + state.errorCount = len(self.errors) + return state +} + +func (self *_parser) restore(state *parserState) { + self.token, self.literal, self.parsedLiteral, self.implicitSemicolon, self.insertSemicolon, self.chr, self.chrOffset, self.offset = + state.tok, state.literal, state.parsedLiteral, state.implicitSemicolon, state.insertSemicolon, state.chr, state.chrOffset, state.offset + self.errors = self.errors[:state.errorCount] +} + func (self *_parser) peek() token.Token { implicitSemicolon, insertSemicolon, chr, chrOffset, offset := self.implicitSemicolon, self.insertSemicolon, self.chr, self.chrOffset, self.offset tok, _, _, _ := self.scan() @@ -297,7 +325,17 @@ func (self *_parser) scan() (tkn token.Token, literal string, parsedLiteral unis insertSemicolon = true tkn, literal = self.scanNumericLiteral(true) } else { - tkn = token.PERIOD + if self.chr == '.' { + self.read() + if self.chr == '.' { + self.read() + tkn = token.ELLIPSIS + } else { + tkn = token.ILLEGAL + } + } else { + tkn = token.PERIOD + } } case ',': tkn = token.COMMA @@ -351,10 +389,19 @@ func (self *_parser) scan() (tkn token.Token, literal string, parsedLiteral unis case '>': tkn = self.switch6(token.GREATER, token.GREATER_OR_EQUAL, '>', token.SHIFT_RIGHT, token.SHIFT_RIGHT_ASSIGN, '>', token.UNSIGNED_SHIFT_RIGHT, token.UNSIGNED_SHIFT_RIGHT_ASSIGN) case '=': - tkn = self.switch2(token.ASSIGN, token.EQUAL) - if tkn == token.EQUAL && self.chr == '=' { + if self.chr == '>' { self.read() - tkn = token.STRICT_EQUAL + if self.implicitSemicolon { + tkn = token.ILLEGAL + } else { + tkn = token.ARROW + } + } else { + tkn = self.switch2(token.ASSIGN, token.EQUAL) + if tkn == token.EQUAL && self.chr == '=' { + self.read() + tkn = token.STRICT_EQUAL + } } case '!': tkn = self.switch2(token.NOT, token.NOT_EQUAL) @@ -378,6 +425,8 @@ func (self *_parser) scan() (tkn token.Token, literal string, parsedLiteral unis if err != nil { tkn = token.ILLEGAL } + case '`': + tkn = token.BACKTICK default: self.errorUnexpected(idx, chr) tkn = token.ILLEGAL @@ -565,20 +614,40 @@ func (self *_parser) scanEscape(quote rune) (int, bool) { length, base = 2, 16 case 'u': self.read() - length, base = 4, 16 + if self.chr == '{' { + self.read() + length, base = 0, 16 + } else { + length, base = 4, 16 + } default: self.read() // Always make progress } - if length > 0 { + if base > 0 { var value uint32 - for ; length > 0 && self.chr != quote && self.chr >= 0; length-- { - digit := uint32(digitValue(self.chr)) - if digit >= base { - break + if length > 0 { + for ; length > 0 && self.chr != quote && self.chr >= 0; length-- { + digit := uint32(digitValue(self.chr)) + if digit >= base { + break + } + value = value*base + digit + self.read() + } + } else { + for self.chr != quote && self.chr >= 0 && value < utf8.MaxRune { + if self.chr == '}' { + self.read() + break + } + digit := uint32(digitValue(self.chr)) + if digit >= base { + break + } + value = value*base + digit + self.read() } - value = value*base + digit - self.read() } chr = rune(value) } @@ -636,7 +705,8 @@ func (self *_parser) scanString(offset int, parse bool) (literal string, parsed self.read() literal = self.str[offset:self.chrOffset] if parse { - parsed, err = parseStringLiteral1(literal[1:len(literal)-1], length, isUnicode) + // TODO strict + parsed, err = parseStringLiteral(literal[1:len(literal)-1], length, isUnicode, false) } return @@ -660,6 +730,84 @@ func (self *_parser) scanNewline() { self.read() } +func (self *_parser) parseTemplateCharacters() (literal string, parsed unistring.String, finished bool, parseErr, err error) { + offset := self.chrOffset + var end int + length := 0 + isUnicode := false + hasCR := false + for { + chr := self.chr + if chr < 0 { + goto unterminated + } + self.read() + if chr == '`' { + finished = true + end = self.chrOffset - 1 + break + } + if chr == '\\' { + if self.chr == '\n' || self.chr == '\r' || self.chr == '\u2028' || self.chr == '\u2029' || self.chr < 0 { + if self.chr == '\r' { + hasCR = true + } + self.scanNewline() + } else { + l, u := self.scanEscape('`') + length += l + if u { + isUnicode = true + } + } + continue + } + if chr == '$' && self.chr == '{' { + self.read() + end = self.chrOffset - 2 + break + } + if chr >= utf8.RuneSelf { + isUnicode = true + if chr > 0xFFFF { + length++ + } + } else if chr == '\r' { + hasCR = true + if self.chr == '\n' { + length-- + } + } + length++ + } + literal = self.str[offset:end] + if hasCR { + literal = normaliseCRLF(literal) + } + parsed, parseErr = parseStringLiteral(literal, length, isUnicode, true) + self.insertSemicolon = true + return +unterminated: + err = errors.New(err_UnexpectedEndOfInput) + return +} + +func normaliseCRLF(s string) string { + var buf strings.Builder + buf.Grow(len(s)) + for i := 0; i < len(s); i++ { + if s[i] == '\r' { + buf.WriteByte('\n') + if i < len(s)-1 && s[i+1] == '\n' { + i++ + } + } else { + buf.WriteByte(s[i]) + } + } + return buf.String() +} + func hex2decimal(chr byte) (value rune, ok bool) { { chr := rune(chr) @@ -714,7 +862,7 @@ error: return nil, errors.New("Illegal numeric literal") } -func parseStringLiteral1(literal string, length int, unicode bool) (unistring.String, error) { +func parseStringLiteral(literal string, length int, unicode, strict bool) (unistring.String, error) { var sb strings.Builder var chars []uint16 if unicode { @@ -783,17 +931,46 @@ func parseStringLiteral1(literal string, length int, unicode bool) (unistring.St case 'x': size = 2 case 'u': - size = 4 - } - if len(str) < size { - return "", fmt.Errorf("invalid escape: \\%s: len(%q) != %d", string(chr), str, size) + if str == "" || str[0] != '{' { + size = 4 + } } - for j := 0; j < size; j++ { - decimal, ok := hex2decimal(str[j]) - if !ok { - return "", fmt.Errorf("invalid escape: \\%s: %q", string(chr), str[:size]) + if size > 0 { + if len(str) < size { + return "", fmt.Errorf("invalid escape: \\%s: len(%q) != %d", string(chr), str, size) + } + for j := 0; j < size; j++ { + decimal, ok := hex2decimal(str[j]) + if !ok { + return "", fmt.Errorf("invalid escape: \\%s: %q", string(chr), str[:size]) + } + value = value<<4 | decimal + } + } else { + str = str[1:] + var val rune + value = -1 + for ; size < len(str); size++ { + if str[size] == '}' { + if size == 0 { + return "", fmt.Errorf("invalid escape: \\%s", string(chr)) + } + size++ + value = val + break + } + decimal, ok := hex2decimal(str[size]) + if !ok { + return "", fmt.Errorf("invalid escape: \\%s: %q", string(chr), str[:size+1]) + } + val = val<<4 | decimal + if val > utf8.MaxRune { + return "", fmt.Errorf("undefined Unicode code-point: %q", str[:size+1]) + } + } + if value == -1 { + return "", fmt.Errorf("unterminated \\u{: %q", str) } - value = value<<4 | decimal } str = str[size:] if chr == 'x' { @@ -809,7 +986,9 @@ func parseStringLiteral1(literal string, length int, unicode bool) (unistring.St } fallthrough case '1', '2', '3', '4', '5', '6', '7': - // TODO strict + if strict { + return "", errors.New("Octal escape sequences are not allowed in this context") + } value = rune(chr) - '0' j := 0 for ; j < 2; j++ { @@ -876,53 +1055,41 @@ func (self *_parser) scanNumericLiteral(decimalPoint bool) (token.Token, string) if decimalPoint { offset-- self.scanMantissa(10) - goto exponent - } - - if self.chr == '0' { - offset := self.chrOffset - self.read() - if self.chr == 'x' || self.chr == 'X' { - // Hexadecimal + } else { + if self.chr == '0' { self.read() - if isDigit(self.chr, 16) { - self.read() - } else { - return token.ILLEGAL, self.str[offset:self.chrOffset] + base := 0 + switch self.chr { + case 'x', 'X': + base = 16 + case 'o', 'O': + base = 8 + case 'b', 'B': + base = 2 + case '.', 'e', 'E': + // no-op + default: + // legacy octal + self.scanMantissa(8) + goto end } - self.scanMantissa(16) - - if self.chrOffset-offset <= 2 { - // Only "0x" or "0X" - self.error(0, "Illegal hexadecimal number") + if base > 0 { + self.read() + if !isDigit(self.chr, base) { + return token.ILLEGAL, self.str[offset:self.chrOffset] + } + self.scanMantissa(base) + goto end } - - goto hexadecimal - } else if self.chr == '.' { - // Float - goto float } else { - // Octal, Float - if self.chr == 'e' || self.chr == 'E' { - goto exponent - } - self.scanMantissa(8) - if self.chr == '8' || self.chr == '9' { - return token.ILLEGAL, self.str[offset:self.chrOffset] - } - goto octal + self.scanMantissa(10) + } + if self.chr == '.' { + self.read() + self.scanMantissa(10) } } - self.scanMantissa(10) - -float: - if self.chr == '.' { - self.read() - self.scanMantissa(10) - } - -exponent: if self.chr == 'e' || self.chr == 'E' { self.read() if self.chr == '-' || self.chr == '+' { @@ -935,9 +1102,7 @@ exponent: return token.ILLEGAL, self.str[offset:self.chrOffset] } } - -hexadecimal: -octal: +end: if isIdentifierStart(self.chr) || isDecimalDigit(self.chr) { return token.ILLEGAL, self.str[offset:self.chrOffset] } diff --git a/vendor/github.com/dop251/goja/parser/regexp.go b/vendor/github.com/dop251/goja/parser/regexp.go index 12f0cb8b3..db2e4aa80 100644 --- a/vendor/github.com/dop251/goja/parser/regexp.go +++ b/vendor/github.com/dop251/goja/parser/regexp.go @@ -284,7 +284,12 @@ func (self *_RegExp_parser) scanEscape(inClass bool) { case 'u': self.read() - length, base = 4, 16 + if self.chr == '{' { + self.read() + length, base = 0, 16 + } else { + length, base = 4, 16 + } case 'b': if inClass { @@ -365,31 +370,36 @@ func (self *_RegExp_parser) scanEscape(inClass bool) { // Otherwise, we're a \u.... or \x... valueOffset := self.chrOffset - var value uint32 - { - length := length - for ; length > 0; length-- { + if length > 0 { + for length := length; length > 0; length-- { + digit := uint32(digitValue(self.chr)) + if digit >= base { + // Not a valid digit + goto skip + } + self.read() + } + } else { + for self.chr != '}' && self.chr != -1 { digit := uint32(digitValue(self.chr)) if digit >= base { // Not a valid digit goto skip } - value = value*base + digit self.read() } } - if length == 4 { + if length == 4 || length == 0 { self.write([]byte{ '\\', 'x', '{', - self.str[valueOffset+0], - self.str[valueOffset+1], - self.str[valueOffset+2], - self.str[valueOffset+3], - '}', }) + self.passString(valueOffset, self.chrOffset) + if length != 0 { + self.writeByte('}') + } } else if length == 2 { self.passString(offset-1, valueOffset+2) } else { diff --git a/vendor/github.com/dop251/goja/parser/statement.go b/vendor/github.com/dop251/goja/parser/statement.go index 740a85065..3aded937a 100644 --- a/vendor/github.com/dop251/goja/parser/statement.go +++ b/vendor/github.com/dop251/goja/parser/statement.go @@ -3,14 +3,13 @@ package parser import ( "encoding/base64" "fmt" + "io/ioutil" + "strings" + "github.com/dop251/goja/ast" "github.com/dop251/goja/file" "github.com/dop251/goja/token" "github.com/go-sourcemap/sourcemap" - "io/ioutil" - "net/url" - "path" - "strings" ) func (self *_parser) parseBlockStatement() *ast.BlockStatement { @@ -128,17 +127,11 @@ func (self *_parser) parseTryStatement() ast.Statement { if self.token == token.CATCH { catch := self.idx self.next() - var parameter *ast.Identifier + var parameter ast.BindingTarget if self.token == token.LEFT_PARENTHESIS { self.next() - if self.token != token.IDENTIFIER { - self.expect(token.IDENTIFIER) - self.nextStatement() - return &ast.BadStatement{From: catch, To: self.idx} - } else { - parameter = self.parseIdentifier() - self.expect(token.RIGHT_PARENTHESIS) - } + parameter = self.parseBindingTarget() + self.expect(token.RIGHT_PARENTHESIS) } node.Catch = &ast.CatchStatement{ Catch: catch, @@ -162,13 +155,15 @@ func (self *_parser) parseTryStatement() ast.Statement { func (self *_parser) parseFunctionParameterList() *ast.ParameterList { opening := self.expect(token.LEFT_PARENTHESIS) - var list []*ast.Identifier + var list []*ast.Binding + var rest ast.Expression for self.token != token.RIGHT_PARENTHESIS && self.token != token.EOF { - if self.token != token.IDENTIFIER { - self.expect(token.IDENTIFIER) - } else { - list = append(list, self.parseIdentifier()) + if self.token == token.ELLIPSIS { + self.next() + rest = self.reinterpretAsDestructBindingTarget(self.parseAssignmentExpression()) + break } + self.parseVariableDeclaration(&list) if self.token != token.RIGHT_PARENTHESIS { self.expect(token.COMMA) } @@ -178,24 +173,11 @@ func (self *_parser) parseFunctionParameterList() *ast.ParameterList { return &ast.ParameterList{ Opening: opening, List: list, + Rest: rest, Closing: closing, } } -func (self *_parser) parseParameterList() (list []string) { - for self.token != token.EOF { - if self.token != token.IDENTIFIER { - self.expect(token.IDENTIFIER) - } - list = append(list, self.literal) - self.next() - if self.token != token.EOF { - self.expect(token.COMMA) - } - } - return -} - func (self *_parser) parseFunction(declaration bool) *ast.FunctionLiteral { node := &ast.FunctionLiteral{ @@ -211,24 +193,32 @@ func (self *_parser) parseFunction(declaration bool) *ast.FunctionLiteral { } node.Name = name node.ParameterList = self.parseFunctionParameterList() - self.parseFunctionBlock(node) + node.Body, node.DeclarationList = self.parseFunctionBlock() node.Source = self.slice(node.Idx0(), node.Idx1()) return node } -func (self *_parser) parseFunctionBlock(node *ast.FunctionLiteral) { - { - self.openScope() - inFunction := self.scope.inFunction - self.scope.inFunction = true - defer func() { - self.scope.inFunction = inFunction - self.closeScope() - }() - node.Body = self.parseBlockStatement() - node.DeclarationList = self.scope.declarationList +func (self *_parser) parseFunctionBlock() (body *ast.BlockStatement, declarationList []*ast.VariableDeclaration) { + self.openScope() + inFunction := self.scope.inFunction + self.scope.inFunction = true + defer func() { + self.scope.inFunction = inFunction + self.closeScope() + }() + body = self.parseBlockStatement() + declarationList = self.scope.declarationList + return +} + +func (self *_parser) parseArrowFunctionBody() (ast.ConciseBody, []*ast.VariableDeclaration) { + if self.token == token.LEFT_BRACE { + return self.parseFunctionBlock() } + return &ast.ExpressionBody{ + Expression: self.parseAssignmentExpression(), + }, nil } func (self *_parser) parseDebuggerStatement() ast.Statement { @@ -452,7 +442,7 @@ func (self *_parser) parseForOrForInStatement() ast.Statement { if tok == token.VAR || tok == token.LET || tok == token.CONST { idx := self.idx self.next() - var list []*ast.VariableExpression + var list []*ast.Binding if tok == token.VAR { list = self.parseVarDeclarationList(idx) } else { @@ -479,13 +469,11 @@ func (self *_parser) parseForOrForInStatement() ast.Statement { into = &ast.ForDeclaration{ Idx: idx, IsConst: tok == token.CONST, - Binding: &ast.BindingIdentifier{ - Name: list[0].Name, - Idx: list[0].Idx, - }, + Target: list[0].Target, } } } else { + self.ensurePatternInit(list) if tok == token.VAR { initializer = &ast.ForLoopInitializerVarDeclList{ List: list, @@ -510,9 +498,13 @@ func (self *_parser) parseForOrForInStatement() ast.Statement { forOf = true } if forIn || forOf { - switch expr.(type) { - case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression, *ast.VariableExpression: + switch e := expr.(type) { + case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression, *ast.Binding: // These are all acceptable + case *ast.ObjectLiteral: + expr = self.reinterpretAsObjectAssignmentPattern(e) + case *ast.ArrayLiteral: + expr = self.reinterpretAsArrayAssignmentPattern(e) default: self.error(idx, "Invalid left-hand side in for-in or for-of") self.nextStatement() @@ -541,11 +533,23 @@ func (self *_parser) parseForOrForInStatement() ast.Statement { return self.parseFor(idx, initializer) } +func (self *_parser) ensurePatternInit(list []*ast.Binding) { + for _, item := range list { + if _, ok := item.Target.(ast.Pattern); ok { + if item.Initializer == nil { + self.error(item.Idx1(), "Missing initializer in destructuring declaration") + break + } + } + } +} + func (self *_parser) parseVariableStatement() *ast.VariableStatement { idx := self.expect(token.VAR) list := self.parseVarDeclarationList(idx) + self.ensurePatternInit(list) self.semicolon() return &ast.VariableStatement{ @@ -561,6 +565,7 @@ func (self *_parser) parseLexicalDeclaration(tok token.Token) *ast.LexicalDeclar } list := self.parseVariableDeclarationList() + self.ensurePatternInit(list) self.semicolon() return &ast.LexicalDeclaration{ @@ -688,26 +693,14 @@ func (self *_parser) parseSourceMap() *sourcemap.Consumer { b64 := urlStr[b64Index+1:] data, err = base64.StdEncoding.DecodeString(b64) } else { - var smUrl *url.URL - if smUrl, err = url.Parse(urlStr); err == nil { - p := smUrl.Path - if !path.IsAbs(p) { - baseName := self.file.Name() - baseUrl, err1 := url.Parse(baseName) - if err1 == nil && baseUrl.Scheme != "" { - baseUrl.Path = path.Join(path.Dir(baseUrl.Path), p) - p = baseUrl.String() - } else { - p = path.Join(path.Dir(baseName), p) - } - } + if sourceURL := file.ResolveSourcemapURL(self.file.Name(), urlStr); sourceURL != nil { if self.opts.sourceMapLoader != nil { - data, err = self.opts.sourceMapLoader(p) + data, err = self.opts.sourceMapLoader(sourceURL.String()) } else { - if smUrl.Scheme == "" || smUrl.Scheme == "file" { - data, err = ioutil.ReadFile(p) + if sourceURL.Scheme == "" || sourceURL.Scheme == "file" { + data, err = ioutil.ReadFile(sourceURL.Path) } else { - err = fmt.Errorf("unsupported source map URL scheme: %s", smUrl.Scheme) + err = fmt.Errorf("unsupported source map URL scheme: %s", sourceURL.Scheme) } } } diff --git a/vendor/github.com/dop251/goja/proxy.go b/vendor/github.com/dop251/goja/proxy.go index a7c2aafa9..de4901315 100644 --- a/vendor/github.com/dop251/goja/proxy.go +++ b/vendor/github.com/dop251/goja/proxy.go @@ -774,6 +774,23 @@ func (p *proxyObject) deleteSym(s *Symbol, throw bool) bool { func (p *proxyObject) ownPropertyKeys(all bool, _ []Value) []Value { if v, ok := p.proxyOwnKeys(); ok { + if !all { + k := 0 + for i, key := range v { + prop := p.val.getOwnProp(key) + if prop == nil { + continue + } + if prop, ok := prop.(*valueProperty); ok && !prop.enumerable { + continue + } + if k != i { + v[k] = v[i] + } + k++ + } + v = v[:k] + } return v } return p.target.self.ownPropertyKeys(all, nil) @@ -848,7 +865,7 @@ func (p *proxyObject) assertConstructor() func(args []Value, newTarget *Object) func (p *proxyObject) apply(call FunctionCall) Value { if p.call == nil { - p.val.runtime.NewTypeError("proxy target is not a function") + panic(p.val.runtime.NewTypeError("proxy target is not a function")) } if v, ok := p.checkHandler().apply(p.target, nilSafe(call.This), call.Arguments); ok { return v @@ -891,15 +908,15 @@ func (p *proxyObject) __isCompatibleDescriptor(extensible bool, desc *PropertyDe return false } - if p.__isGenericDescriptor(desc) { + if desc.IsGeneric() { return true } - if p.__isDataDescriptor(desc) != !current.accessor { + if desc.IsData() != !current.accessor { return desc.Configurable != FLAG_FALSE } - if p.__isDataDescriptor(desc) && !current.accessor { + if desc.IsData() && !current.accessor { if !current.configurable { if desc.Writable == FLAG_TRUE && !current.writable { return false @@ -912,7 +929,7 @@ func (p *proxyObject) __isCompatibleDescriptor(extensible bool, desc *PropertyDe } return true } - if p.__isAccessorDescriptor(desc) && current.accessor { + if desc.IsAccessor() && current.accessor { if !current.configurable { if desc.Setter != nil && desc.Setter.SameAs(current.setterFunc) { return false @@ -926,18 +943,6 @@ func (p *proxyObject) __isCompatibleDescriptor(extensible bool, desc *PropertyDe return true } -func (p *proxyObject) __isAccessorDescriptor(desc *PropertyDescriptor) bool { - return desc.Setter != nil || desc.Getter != nil -} - -func (p *proxyObject) __isDataDescriptor(desc *PropertyDescriptor) bool { - return desc.Value != nil || desc.Writable != FLAG_NOT_SET -} - -func (p *proxyObject) __isGenericDescriptor(desc *PropertyDescriptor) bool { - return !p.__isAccessorDescriptor(desc) && !p.__isDataDescriptor(desc) -} - func (p *proxyObject) __sameValue(val1, val2 Value) bool { if val1 == nil && val2 == nil { return true @@ -1004,7 +1009,7 @@ func (p *proxyObject) ownKeys(all bool, _ []Value) []Value { // we can assume ac func (p *proxyObject) ownSymbols(all bool, accum []Value) []Value { if vals, ok := p.proxyOwnKeys(); ok { - res := p.filterKeys(vals, true, true) + res := p.filterKeys(vals, all, true) if accum == nil { return res } diff --git a/vendor/github.com/dop251/goja/runtime.go b/vendor/github.com/dop251/goja/runtime.go index 3097ef17d..e087a7e21 100644 --- a/vendor/github.com/dop251/goja/runtime.go +++ b/vendor/github.com/dop251/goja/runtime.go @@ -33,6 +33,7 @@ var ( typeValue = reflect.TypeOf((*Value)(nil)).Elem() typeObject = reflect.TypeOf((*Object)(nil)) typeTime = reflect.TypeOf(time.Time{}) + typeBytes = reflect.TypeOf(([]byte)(nil)) ) type iterationKind int @@ -57,6 +58,7 @@ type global struct { Date *Object Symbol *Object Proxy *Object + Promise *Object ArrayBuffer *Object DataView *Object @@ -77,6 +79,7 @@ type global struct { Set *Object Error *Object + AggregateError *Object TypeError *Object ReferenceError *Object SyntaxError *Object @@ -103,6 +106,7 @@ type global struct { WeakMapPrototype *Object MapPrototype *Object SetPrototype *Object + PromisePrototype *Object IteratorPrototype *Object ArrayIteratorPrototype *Object @@ -112,6 +116,7 @@ type global struct { RegExpStringIteratorPrototype *Object ErrorPrototype *Object + AggregateErrorPrototype *Object TypeErrorPrototype *Object SyntaxErrorPrototype *Object RangeErrorPrototype *Object @@ -176,6 +181,10 @@ type Runtime struct { vm *vm hash *maphash.Hash idSeq uint64 + + jobQueue []func() + + promiseRejectionTracker PromiseRejectionTracker } type StackFrame struct { @@ -352,9 +361,11 @@ func (r *Runtime) init() { } r.vm.init() - r.global.FunctionPrototype = r.newNativeFunc(func(FunctionCall) Value { + funcProto := r.newNativeFunc(func(FunctionCall) Value { return _undefined }, nil, " ", nil, 0) + r.global.FunctionPrototype = funcProto + funcProtoObj := funcProto.self.(*nativeFuncObject) r.global.IteratorPrototype = r.newLazyObject(r.createIterProto) @@ -384,6 +395,7 @@ func (r *Runtime) init() { r.initWeakMap() r.initMap() r.initSet() + r.initPromise() r.global.thrower = r.newNativeFunc(r.builtin_thrower, nil, "thrower", nil, 0) r.global.throwerProperty = &valueProperty{ @@ -391,6 +403,9 @@ func (r *Runtime) init() { setterFunc: r.global.thrower, accessor: true, } + + funcProtoObj._put("caller", r.global.throwerProperty) + funcProtoObj._put("arguments", r.global.throwerProperty) } func (r *Runtime) typeErrorResult(throw bool, args ...interface{}) { @@ -487,13 +502,35 @@ func (r *Runtime) newFunc(name unistring.String, len int, strict bool) (f *funcO f.class = classFunction f.val = v f.extensible = true + f.strict = strict v.self = f f.prototype = r.global.FunctionPrototype f.init(name, len) - if strict { - f._put("caller", r.global.throwerProperty) - f._put("arguments", r.global.throwerProperty) + return +} + +func (r *Runtime) newArrowFunc(name unistring.String, len int, strict bool) (f *arrowFuncObject) { + v := &Object{runtime: r} + + f = &arrowFuncObject{} + f.class = classFunction + f.val = v + f.extensible = true + f.strict = strict + + vm := r.vm + var this Value + if vm.sb >= 0 { + this = vm.stack[vm.sb] + } else { + this = vm.r.globalObject } + + f.this = this + f.newTarget = vm.newTarget + v.self = f + f.prototype = r.global.FunctionPrototype + f.init(name, len) return } @@ -748,14 +785,6 @@ func (r *Runtime) error_toString(call FunctionCall) Value { return sb.String() } -func (r *Runtime) builtin_Error(args []Value, proto *Object) *Object { - obj := r.newBaseObject(proto, classError) - if len(args) > 0 && args[0] != _undefined { - obj._putProp("message", args[0], true, false, true) - } - return obj.val -} - func (r *Runtime) builtin_new(construct *Object, args []Value) *Object { return r.toConstructor(construct)(args, nil) } @@ -764,15 +793,30 @@ func (r *Runtime) throw(e Value) { panic(e) } -func (r *Runtime) builtin_thrower(FunctionCall) Value { - r.typeErrorResult(true, "'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them") +func (r *Runtime) builtin_thrower(call FunctionCall) Value { + obj := r.toObject(call.This) + strict := true + switch fn := obj.self.(type) { + case *funcObject: + strict = fn.strict + } + r.typeErrorResult(strict, "'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them") return nil } func (r *Runtime) eval(srcVal valueString, direct, strict bool, this Value) Value { src := escapeInvalidUtf16(srcVal) vm := r.vm - p, err := r.compile("", src, strict, true, !direct || vm.stash == &r.global.stash) + inGlobal := true + if direct { + for s := vm.stash; s != nil; s = s.outer { + if s.variable { + inGlobal = false + break + } + } + } + p, err := r.compile("", src, strict, true, inGlobal) if err != nil { panic(err) } @@ -1060,6 +1104,18 @@ func toIntStrict(i int64) int { return int(i) } +func toIntClamp(i int64) int { + if bits.UintSize == 32 { + if i > math.MaxInt32 { + return math.MaxInt32 + } + if i < math.MinInt32 { + return math.MinInt32 + } + } + return int(i) +} + func (r *Runtime) toIndex(v Value) int { intIdx := v.ToInteger() if intIdx >= 0 && intIdx < maxInt { @@ -1734,7 +1790,76 @@ func (r *Runtime) wrapReflectFunc(value reflect.Value) func(FunctionCall) Value func (r *Runtime) toReflectValue(v Value, dst reflect.Value, ctx *objectExportCtx) error { typ := dst.Type() - switch typ.Kind() { + + if typ == typeValue { + dst.Set(reflect.ValueOf(v)) + return nil + } + + if typ == typeObject { + if obj, ok := v.(*Object); ok { + dst.Set(reflect.ValueOf(obj)) + return nil + } + } + + if typ == typeCallable { + if fn, ok := AssertFunction(v); ok { + dst.Set(reflect.ValueOf(fn)) + return nil + } + } + + et := v.ExportType() + if et == nil || et == reflectTypeNil { + dst.Set(reflect.Zero(typ)) + return nil + } + + kind := typ.Kind() + for i := 0; ; i++ { + if et.AssignableTo(typ) { + ev := reflect.ValueOf(exportValue(v, ctx)) + for ; i > 0; i-- { + ev = ev.Elem() + } + dst.Set(ev) + return nil + } + expKind := et.Kind() + if expKind == kind && et.ConvertibleTo(typ) || expKind == reflect.String && typ == typeBytes { + ev := reflect.ValueOf(exportValue(v, ctx)) + for ; i > 0; i-- { + ev = ev.Elem() + } + dst.Set(ev.Convert(typ)) + return nil + } + if expKind == reflect.Ptr { + et = et.Elem() + } else { + break + } + } + + if typ == typeTime { + if obj, ok := v.(*Object); ok { + if d, ok := obj.self.(*dateObject); ok { + dst.Set(reflect.ValueOf(d.time())) + return nil + } + } + if et.Kind() == reflect.String { + tme, ok := dateParse(v.String()) + if !ok { + return fmt.Errorf("could not convert string %v to %v", v, typ) + } + dst.Set(reflect.ValueOf(tme)) + return nil + } + } + + switch kind { case reflect.String: dst.Set(reflect.ValueOf(v.String()).Convert(typ)) return nil @@ -1777,59 +1902,6 @@ func (r *Runtime) toReflectValue(v Value, dst reflect.Value, ctx *objectExportCt case reflect.Float32: dst.Set(reflect.ValueOf(toFloat32(v)).Convert(typ)) return nil - } - - if typ == typeCallable { - if fn, ok := AssertFunction(v); ok { - dst.Set(reflect.ValueOf(fn)) - return nil - } - } - - if typ == typeValue { - dst.Set(reflect.ValueOf(v)) - return nil - } - - if typ == typeObject { - if obj, ok := v.(*Object); ok { - dst.Set(reflect.ValueOf(obj)) - return nil - } - } - - { - et := v.ExportType() - if et == nil || et == reflectTypeNil { - dst.Set(reflect.Zero(typ)) - return nil - } - if et.AssignableTo(typ) { - dst.Set(reflect.ValueOf(exportValue(v, ctx))) - return nil - } else if et.ConvertibleTo(typ) { - dst.Set(reflect.ValueOf(exportValue(v, ctx)).Convert(typ)) - return nil - } - if typ == typeTime { - if obj, ok := v.(*Object); ok { - if d, ok := obj.self.(*dateObject); ok { - dst.Set(reflect.ValueOf(d.time())) - return nil - } - } - if et.Kind() == reflect.String { - tme, ok := dateParse(v.String()) - if !ok { - return fmt.Errorf("could not convert string %v to %v", v, typ) - } - dst.Set(reflect.ValueOf(tme)) - return nil - } - } - } - - switch typ.Kind() { case reflect.Slice: if o, ok := v.(*Object); ok { if o.self.className() == classArray { @@ -2324,7 +2396,16 @@ func (r *Runtime) getHash() *maphash.Hash { // called when the top level function returns (i.e. control is passed outside the Runtime). func (r *Runtime) leave() { - // run jobs, etc... + for { + jobs := r.jobQueue + r.jobQueue = nil + if len(jobs) == 0 { + break + } + for _, job := range jobs { + job() + } + } } func nilSafe(v Value) Value { @@ -2434,9 +2515,253 @@ func (r *Runtime) setGlobal(name unistring.String, v Value, strict bool) { } } -func strPropToInt(s unistring.String) (int, bool) { - if res, err := strconv.Atoi(string(s)); err == nil { - return res, true +func (r *Runtime) trackPromiseRejection(p *Promise, operation PromiseRejectionOperation) { + if r.promiseRejectionTracker != nil { + r.promiseRejectionTracker(p, operation) + } +} + +func (r *Runtime) callJobCallback(job *jobCallback, this Value, args ...Value) Value { + return job.callback(FunctionCall{This: this, Arguments: args}) +} + +func (r *Runtime) invoke(v Value, p unistring.String, args ...Value) Value { + o := v.ToObject(r) + return r.toCallable(o.self.getStr(p, nil))(FunctionCall{This: v, Arguments: args}) +} + +func (r *Runtime) iterableToList(items Value, method func(FunctionCall) Value) []Value { + iter := r.getIterator(items, method) + var values []Value + r.iterate(iter, func(item Value) { + values = append(values, item) + }) + return values +} + +func (r *Runtime) putSpeciesReturnThis(o objectImpl) { + o._putSym(SymSpecies, &valueProperty{ + getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), + accessor: true, + configurable: true, + }) +} + +func strToArrayIdx(s unistring.String) uint32 { + if s == "" { + return math.MaxUint32 + } + l := len(s) + if s[0] == '0' { + if l == 1 { + return 0 + } + return math.MaxUint32 + } + var n uint32 + if l < 10 { + // guaranteed not to overflow + for i := 0; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + return math.MaxUint32 + } + n = n*10 + uint32(c-'0') + } + return n + } + if l > 10 { + // guaranteed to overflow + return math.MaxUint32 + } + c9 := s[9] + if c9 < '0' || c9 > '9' { + return math.MaxUint32 + } + for i := 0; i < 9; i++ { + c := s[i] + if c < '0' || c > '9' { + return math.MaxUint32 + } + n = n*10 + uint32(c-'0') + } + if n >= math.MaxUint32/10+1 { + return math.MaxUint32 + } + n *= 10 + n1 := n + uint32(c9-'0') + if n1 < n { + return math.MaxUint32 + } + + return n1 +} + +func strToInt32(s unistring.String) (int32, bool) { + if s == "" { + return -1, false + } + neg := s[0] == '-' + if neg { + s = s[1:] + } + l := len(s) + if s[0] == '0' { + if l == 1 { + return 0, !neg + } + return -1, false + } + var n uint32 + if l < 10 { + // guaranteed not to overflow + for i := 0; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + return -1, false + } + n = n*10 + uint32(c-'0') + } + } else if l > 10 { + // guaranteed to overflow + return -1, false + } else { + c9 := s[9] + if c9 >= '0' { + if !neg && c9 > '7' || c9 > '8' { + // guaranteed to overflow + return -1, false + } + for i := 0; i < 9; i++ { + c := s[i] + if c < '0' || c > '9' { + return -1, false + } + n = n*10 + uint32(c-'0') + } + if n >= math.MaxInt32/10+1 { + // valid number, but it overflows integer + return 0, false + } + n = n*10 + uint32(c9-'0') + } else { + return -1, false + } + } + if neg { + return int32(-n), true + } + return int32(n), true +} + +func strToInt64(s unistring.String) (int64, bool) { + if s == "" { + return -1, false + } + neg := s[0] == '-' + if neg { + s = s[1:] + } + l := len(s) + if s[0] == '0' { + if l == 1 { + return 0, !neg + } + return -1, false } - return 0, false + var n uint64 + if l < 19 { + // guaranteed not to overflow + for i := 0; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + return -1, false + } + n = n*10 + uint64(c-'0') + } + } else if l > 19 { + // guaranteed to overflow + return -1, false + } else { + c18 := s[18] + if c18 >= '0' { + if !neg && c18 > '7' || c18 > '8' { + // guaranteed to overflow + return -1, false + } + for i := 0; i < 18; i++ { + c := s[i] + if c < '0' || c > '9' { + return -1, false + } + n = n*10 + uint64(c-'0') + } + if n >= math.MaxInt64/10+1 { + // valid number, but it overflows integer + return 0, false + } + n = n*10 + uint64(c18-'0') + } else { + return -1, false + } + } + if neg { + return int64(-n), true + } + return int64(n), true +} + +func strToInt(s unistring.String) (int, bool) { + if bits.UintSize == 32 { + n, ok := strToInt32(s) + return int(n), ok + } + n, ok := strToInt64(s) + return int(n), ok +} + +// Attempts to convert a string into a canonical integer. +// On success returns (number, true). +// If it was a canonical number, but not an integer returns (0, false). This includes -0 and overflows. +// In all other cases returns (-1, false). +// See https://262.ecma-international.org/#sec-canonicalnumericindexstring +func strToIntNum(s unistring.String) (int, bool) { + n, ok := strToInt64(s) + if n == 0 { + return 0, ok + } + if ok && n >= -maxInt && n <= maxInt { + if bits.UintSize == 32 { + if n > math.MaxInt32 || n < math.MinInt32 { + return 0, false + } + } + return int(n), true + } + str := stringValueFromRaw(s) + if str.ToNumber().toString().SameAs(str) { + return 0, false + } + return -1, false +} + +func strToGoIdx(s unistring.String) int { + if n, ok := strToInt(s); ok { + return n + } + return -1 +} + +func strToIdx64(s unistring.String) int64 { + if n, ok := strToInt64(s); ok { + return n + } + return -1 +} + +func assertCallable(v Value) (func(FunctionCall) Value, bool) { + if obj, ok := v.(*Object); ok { + return obj.self.assertCallable() + } + return nil, false } diff --git a/vendor/github.com/dop251/goja/string.go b/vendor/github.com/dop251/goja/string.go index 8df5e53f5..85687eddc 100644 --- a/vendor/github.com/dop251/goja/string.go +++ b/vendor/github.com/dop251/goja/string.go @@ -33,6 +33,7 @@ var ( stringEmpty valueString = asciiString("") stringError valueString = asciiString("Error") + stringAggregateError valueString = asciiString("AggregateError") stringTypeError valueString = asciiString("TypeError") stringReferenceError valueString = asciiString("ReferenceError") stringSyntaxError valueString = asciiString("SyntaxError") diff --git a/vendor/github.com/dop251/goja/string_ascii.go b/vendor/github.com/dop251/goja/string_ascii.go index b265da33c..26231f579 100644 --- a/vendor/github.com/dop251/goja/string_ascii.go +++ b/vendor/github.com/dop251/goja/string_ascii.go @@ -49,7 +49,7 @@ func (s asciiString) utf16Runes() []rune { } // ss must be trimmed -func strToInt(ss string) (int64, error) { +func stringToInt(ss string) (int64, error) { if ss == "" { return 0, nil } @@ -70,7 +70,7 @@ func strToInt(ss string) (int64, error) { } func (s asciiString) _toInt() (int64, error) { - return strToInt(strings.TrimSpace(string(s))) + return stringToInt(strings.TrimSpace(string(s))) } func isRangeErr(err error) bool { diff --git a/vendor/github.com/dop251/goja/string_unicode.go b/vendor/github.com/dop251/goja/string_unicode.go index 96b00ed01..71e387bc2 100644 --- a/vendor/github.com/dop251/goja/string_unicode.go +++ b/vendor/github.com/dop251/goja/string_unicode.go @@ -252,8 +252,10 @@ func (b *valueStringBuilder) WriteSubstring(source valueString, start int, end i if ascii, ok := source.(asciiString); ok { if b.ascii() { b.asciiBuilder.WriteString(string(ascii[start:end])) - return + } else { + b.unicodeBuilder.writeASCIIString(string(ascii[start:end])) } + return } us := source.(unicodeString) if b.ascii() { diff --git a/vendor/github.com/dop251/goja/token/token_const.go b/vendor/github.com/dop251/goja/token/token_const.go index bf6e26a74..bdbc15e0d 100644 --- a/vendor/github.com/dop251/goja/token/token_const.go +++ b/vendor/github.com/dop251/goja/token/token_const.go @@ -71,6 +71,9 @@ const ( SEMICOLON // ; COLON // : QUESTION_MARK // ? + ARROW // => + ELLIPSIS // ... + BACKTICK // ` firstKeyword IF @@ -170,6 +173,9 @@ var token2string = [...]string{ SEMICOLON: ";", COLON: ":", QUESTION_MARK: "?", + ARROW: "=>", + ELLIPSIS: "...", + BACKTICK: "`", IF: "if", IN: "in", OF: "of", diff --git a/vendor/github.com/dop251/goja/typedarrays.go b/vendor/github.com/dop251/goja/typedarrays.go index 7a2e30d8e..632607268 100644 --- a/vendor/github.com/dop251/goja/typedarrays.go +++ b/vendor/github.com/dop251/goja/typedarrays.go @@ -448,94 +448,100 @@ func (a *float64Array) typeMatch(v Value) bool { } func (a *typedArrayObject) _getIdx(idx int) Value { - a.viewedArrayBuf.ensureNotDetached() if 0 <= idx && idx < a.length { + if !a.viewedArrayBuf.ensureNotDetached(false) { + return nil + } return a.typedArray.get(idx + a.offset) } return nil } func (a *typedArrayObject) getOwnPropStr(name unistring.String) Value { - if idx, ok := strPropToInt(name); ok { + idx, ok := strToIntNum(name) + if ok { v := a._getIdx(idx) if v != nil { return &valueProperty{ - value: v, - writable: true, - enumerable: true, + value: v, + writable: true, + enumerable: true, + configurable: true, } } return nil } + if idx == 0 { + return nil + } return a.baseObject.getOwnPropStr(name) } func (a *typedArrayObject) getOwnPropIdx(idx valueInt) Value { - v := a._getIdx(toIntStrict(int64(idx))) + v := a._getIdx(toIntClamp(int64(idx))) if v != nil { return &valueProperty{ - value: v, - writable: true, - enumerable: true, + value: v, + writable: true, + enumerable: true, + configurable: true, } } return nil } func (a *typedArrayObject) getStr(name unistring.String, receiver Value) Value { - if idx, ok := strPropToInt(name); ok { - prop := a._getIdx(idx) - if prop == nil { - if a.prototype != nil { - if receiver == nil { - return a.prototype.self.getStr(name, a.val) - } - return a.prototype.self.getStr(name, receiver) - } - } - return prop + idx, ok := strToIntNum(name) + if ok { + return a._getIdx(idx) + } + if idx == 0 { + return nil } return a.baseObject.getStr(name, receiver) } func (a *typedArrayObject) getIdx(idx valueInt, receiver Value) Value { - prop := a._getIdx(toIntStrict(int64(idx))) - if prop == nil { - if a.prototype != nil { - if receiver == nil { - return a.prototype.self.getIdx(idx, a.val) - } - return a.prototype.self.getIdx(idx, receiver) + return a._getIdx(toIntClamp(int64(idx))) +} + +func (a *typedArrayObject) isValidIntegerIndex(idx int, throw bool) bool { + if a.viewedArrayBuf.ensureNotDetached(throw) { + if idx >= 0 && idx < a.length { + return true } + a.val.runtime.typeErrorResult(throw, "Invalid typed array index") } - return prop + return false } -func (a *typedArrayObject) _putIdx(idx int, v Value, throw bool) bool { +func (a *typedArrayObject) _putIdx(idx int, v Value) { v = v.ToNumber() - a.viewedArrayBuf.ensureNotDetached() - if idx >= 0 && idx < a.length { + if a.isValidIntegerIndex(idx, false) { a.typedArray.set(idx+a.offset, v) - return true } - // As far as I understand the specification this should throw, but neither V8 nor SpiderMonkey does - return false } func (a *typedArrayObject) _hasIdx(idx int) bool { - a.viewedArrayBuf.ensureNotDetached() - return idx >= 0 && idx < a.length + return a.isValidIntegerIndex(idx, false) } func (a *typedArrayObject) setOwnStr(p unistring.String, v Value, throw bool) bool { - if idx, ok := strPropToInt(p); ok { - return a._putIdx(idx, v, throw) + idx, ok := strToIntNum(p) + if ok { + a._putIdx(idx, v) + return true + } + if idx == 0 { + v.ToNumber() // make sure it throws + return false } return a.baseObject.setOwnStr(p, v, throw) } func (a *typedArrayObject) setOwnIdx(p valueInt, v Value, throw bool) bool { - return a._putIdx(toIntStrict(int64(p)), v, throw) + a._putIdx(toIntClamp(int64(p)), v) + return true } func (a *typedArrayObject) setForeignStr(p unistring.String, v, receiver Value, throw bool) (res bool, handled bool) { @@ -547,50 +553,72 @@ func (a *typedArrayObject) setForeignIdx(p valueInt, v, receiver Value, throw bo } func (a *typedArrayObject) hasOwnPropertyStr(name unistring.String) bool { - if idx, ok := strPropToInt(name); ok { - a.viewedArrayBuf.ensureNotDetached() - return idx < a.length + idx, ok := strToIntNum(name) + if ok { + return a._hasIdx(idx) + } + if idx == 0 { + return false } - return a.baseObject.hasOwnPropertyStr(name) } func (a *typedArrayObject) hasOwnPropertyIdx(idx valueInt) bool { - return a._hasIdx(toIntStrict(int64(idx))) + return a._hasIdx(toIntClamp(int64(idx))) } func (a *typedArrayObject) _defineIdxProperty(idx int, desc PropertyDescriptor, throw bool) bool { - prop, ok := a._defineOwnProperty(unistring.String(strconv.Itoa(idx)), a.getOwnPropIdx(valueInt(idx)), desc, throw) + if desc.Configurable == FLAG_FALSE || desc.Enumerable == FLAG_FALSE || desc.IsAccessor() || desc.Writable == FLAG_FALSE { + a.val.runtime.typeErrorResult(throw, "Cannot redefine property: %d", idx) + return false + } + _, ok := a._defineOwnProperty(unistring.String(strconv.Itoa(idx)), a.getOwnPropIdx(valueInt(idx)), desc, throw) if ok { - return a._putIdx(idx, prop, throw) + if !a.isValidIntegerIndex(idx, throw) { + return false + } + a._putIdx(idx, desc.Value) + return true } return ok } func (a *typedArrayObject) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool { - if idx, ok := strPropToInt(name); ok { + idx, ok := strToIntNum(name) + if ok { return a._defineIdxProperty(idx, desc, throw) } + if idx == 0 { + a.viewedArrayBuf.ensureNotDetached(throw) + a.val.runtime.typeErrorResult(throw, "Invalid typed array index") + return false + } return a.baseObject.defineOwnPropertyStr(name, desc, throw) } func (a *typedArrayObject) defineOwnPropertyIdx(name valueInt, desc PropertyDescriptor, throw bool) bool { - return a._defineIdxProperty(toIntStrict(int64(name)), desc, throw) + return a._defineIdxProperty(toIntClamp(int64(name)), desc, throw) } func (a *typedArrayObject) deleteStr(name unistring.String, throw bool) bool { - if idx, ok := strPropToInt(name); ok { - if idx < a.length { + idx, ok := strToIntNum(name) + if ok { + if !a.isValidIntegerIndex(idx, false) { a.val.runtime.typeErrorResult(throw, "Cannot delete property '%d' of %s", idx, a.val.String()) + return false } + return true + } + if idx == 0 { + return true } - return a.baseObject.deleteStr(name, throw) } func (a *typedArrayObject) deleteIdx(idx valueInt, throw bool) bool { - if idx >= 0 && int64(idx) < int64(a.length) { + if a.viewedArrayBuf.ensureNotDetached(throw) && idx >= 0 && int64(idx) < int64(a.length) { a.val.runtime.typeErrorResult(throw, "Cannot delete property '%d' of %s", idx, a.val.String()) + return false } return true @@ -688,7 +716,7 @@ func (r *Runtime) newFloat64ArrayObject(buf *arrayBufferObject, offset, length i func (o *dataViewObject) getIdxAndByteOrder(idxVal, littleEndianVal Value, size int) (int, byteOrder) { getIdx := o.val.runtime.toIndex(idxVal) - o.viewedArrayBuf.ensureNotDetached() + o.viewedArrayBuf.ensureNotDetached(true) if getIdx+size > o.byteLen { panic(o.val.runtime.newError(o.val.runtime.global.RangeError, "Index %d is out of bounds", getIdx)) } @@ -706,10 +734,12 @@ func (o *dataViewObject) getIdxAndByteOrder(idxVal, littleEndianVal Value, size return getIdx, bo } -func (o *arrayBufferObject) ensureNotDetached() { +func (o *arrayBufferObject) ensureNotDetached(throw bool) bool { if o.detached { - panic(o.val.runtime.NewTypeError("ArrayBuffer is detached")) + o.val.runtime.typeErrorResult(throw, "ArrayBuffer is detached") + return false } + return true } func (o *arrayBufferObject) getFloat32(idx int, byteOrder byteOrder) float32 { diff --git a/vendor/github.com/dop251/goja/value.go b/vendor/github.com/dop251/goja/value.go index dd492148d..b42233039 100644 --- a/vendor/github.com/dop251/goja/value.go +++ b/vendor/github.com/dop251/goja/value.go @@ -996,11 +996,17 @@ func (s *Symbol) ToString() Value { } func (s *Symbol) String() string { - return s.desc.String() + if s.desc != nil { + return s.desc.String() + } + return "" } func (s *Symbol) string() unistring.String { - return s.desc.string() + if s.desc != nil { + return s.desc.string() + } + return "" } func (s *Symbol) ToFloat() float64 { @@ -1078,10 +1084,26 @@ func NewSymbol(s string) *Symbol { } func (s *Symbol) descriptiveString() valueString { - if s.desc == nil { - return stringEmpty + desc := s.desc + if desc == nil { + desc = stringEmpty + } + return asciiString("Symbol(").concat(desc).concat(asciiString(")")) +} + +func funcName(prefix string, n Value) valueString { + var b valueStringBuilder + b.WriteString(asciiString(prefix)) + if sym, ok := n.(*Symbol); ok { + if sym.desc != nil { + b.WriteRune('[') + b.WriteString(sym.desc) + b.WriteRune(']') + } + } else { + b.WriteString(n.toString()) } - return asciiString("Symbol(").concat(s.desc).concat(asciiString(")")) + return b.String() } func init() { diff --git a/vendor/github.com/dop251/goja/vm.go b/vendor/github.com/dop251/goja/vm.go index 2e400222a..524cc808a 100644 --- a/vendor/github.com/dop251/goja/vm.go +++ b/vendor/github.com/dop251/goja/vm.go @@ -5,6 +5,7 @@ import ( "math" "runtime" "strconv" + "strings" "sync" "sync/atomic" @@ -25,7 +26,9 @@ type stash struct { outer *stash - function bool + // true if this stash is a VariableEnvironment, i.e. dynamic var declarations created + // by direct eval go here. + variable bool } type context struct { @@ -90,10 +93,13 @@ func (r *stashRefLex) set(v Value) { type stashRefConst struct { stashRefLex + strictConst bool } func (r *stashRefConst) set(v Value) { - panic(errAssignToConst) + if r.strictConst { + panic(errAssignToConst) + } } type objRef struct { @@ -325,6 +331,7 @@ func (s *stash) getRefByName(name unistring.String, strict bool) ref { idx: int(idx &^ maskTyp), }, }, + strictConst: strict || (idx&maskStrict != 0), } } } else { @@ -360,7 +367,7 @@ func (s *stash) createLexBinding(name unistring.String, isConst bool) { if _, exists := s.names[name]; !exists { idx := uint32(len(s.names)) if isConst { - idx |= maskConst + idx |= maskConst | maskStrict } s.names[name] = idx s.values = append(s.values, nil) @@ -771,9 +778,10 @@ func (vm *vm) storeStack1Lex(s int) { func (vm *vm) initStack(s int) { if s <= 0 { - panic("Illegal stack var index") + vm.stack[vm.sb-s] = vm.stack[vm.sp-1] + } else { + vm.stack[vm.sb+vm.args+s] = vm.stack[vm.sp-1] } - vm.stack[vm.sb+vm.args+s] = vm.stack[vm.sp-1] vm.pc++ } @@ -1227,6 +1235,53 @@ func (j jump) exec(vm *vm) { vm.pc += int(j) } +type _toPropertyKey struct{} + +func (_toPropertyKey) exec(vm *vm) { + p := vm.sp - 1 + vm.stack[p] = toPropertyKey(vm.stack[p]) + vm.pc++ +} + +type _toString struct{} + +func (_toString) exec(vm *vm) { + p := vm.sp - 1 + vm.stack[p] = vm.stack[p].toString() + vm.pc++ +} + +type _getElemRef struct{} + +var getElemRef _getElemRef + +func (_getElemRef) exec(vm *vm) { + obj := vm.stack[vm.sp-2].ToObject(vm.r) + propName := toPropertyKey(vm.stack[vm.sp-1]) + vm.refStack = append(vm.refStack, &objRef{ + base: obj.self, + name: propName.string(), + }) + vm.sp -= 2 + vm.pc++ +} + +type _getElemRefStrict struct{} + +var getElemRefStrict _getElemRefStrict + +func (_getElemRefStrict) exec(vm *vm) { + obj := vm.stack[vm.sp-2].ToObject(vm.r) + propName := toPropertyKey(vm.stack[vm.sp-1]) + vm.refStack = append(vm.refStack, &objRef{ + base: obj.self, + name: propName.string(), + strict: true, + }) + vm.sp -= 2 + vm.pc++ +} + type _setElem struct{} var setElem _setElem @@ -1243,6 +1298,39 @@ func (_setElem) exec(vm *vm) { vm.pc++ } +type _setElem1 struct{} + +var setElem1 _setElem1 + +func (_setElem1) exec(vm *vm) { + obj := vm.stack[vm.sp-3].ToObject(vm.r) + propName := vm.stack[vm.sp-2] + val := vm.stack[vm.sp-1] + + obj.setOwn(propName, val, true) + + vm.sp -= 2 + vm.pc++ +} + +type _setElem1Named struct{} + +var setElem1Named _setElem1Named + +func (_setElem1Named) exec(vm *vm) { + obj := vm.stack[vm.sp-3].ToObject(vm.r) + propName := vm.stack[vm.sp-2] + val := vm.stack[vm.sp-1] + vm.r.toObject(val).self.defineOwnPropertyStr("name", PropertyDescriptor{ + Value: funcName("", propName), + Configurable: FLAG_TRUE, + }, true) + obj.setOwn(propName, val, true) + + vm.sp -= 2 + vm.pc++ +} + type _setElemP struct{} var setElemP _setElemP @@ -1339,6 +1427,29 @@ func (d deletePropStrict) exec(vm *vm) { vm.pc++ } +type getPropRef unistring.String + +func (p getPropRef) exec(vm *vm) { + vm.refStack = append(vm.refStack, &objRef{ + base: vm.stack[vm.sp-1].ToObject(vm.r).self, + name: unistring.String(p), + }) + vm.sp-- + vm.pc++ +} + +type getPropRefStrict unistring.String + +func (p getPropRefStrict) exec(vm *vm) { + vm.refStack = append(vm.refStack, &objRef{ + base: vm.stack[vm.sp-1].ToObject(vm.r).self, + name: unistring.String(p), + strict: true, + }) + vm.sp-- + vm.pc++ +} + type setProp unistring.String func (p setProp) exec(vm *vm) { @@ -1407,6 +1518,10 @@ type setPropGetter unistring.String func (s setPropGetter) exec(vm *vm) { obj := vm.r.toObject(vm.stack[vm.sp-2]) val := vm.stack[vm.sp-1] + vm.r.toObject(val).self.defineOwnPropertyStr("name", PropertyDescriptor{ + Value: asciiString("get ").concat(stringValueFromRaw(unistring.String(s))), + Configurable: FLAG_TRUE, + }, true) descr := PropertyDescriptor{ Getter: val, @@ -1426,6 +1541,11 @@ func (s setPropSetter) exec(vm *vm) { obj := vm.r.toObject(vm.stack[vm.sp-2]) val := vm.stack[vm.sp-1] + vm.r.toObject(val).self.defineOwnPropertyStr("name", PropertyDescriptor{ + Value: asciiString("set ").concat(stringValueFromRaw(unistring.String(s))), + Configurable: FLAG_TRUE, + }, true) + descr := PropertyDescriptor{ Setter: val, Configurable: FLAG_TRUE, @@ -1438,6 +1558,57 @@ func (s setPropSetter) exec(vm *vm) { vm.pc++ } +type _setPropGetter1 struct{} + +var setPropGetter1 _setPropGetter1 + +func (s _setPropGetter1) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-3]) + propName := vm.stack[vm.sp-2] + val := vm.stack[vm.sp-1] + vm.r.toObject(val).self.defineOwnPropertyStr("name", PropertyDescriptor{ + Value: funcName("get ", propName), + Configurable: FLAG_TRUE, + }, true) + + descr := PropertyDescriptor{ + Getter: val, + Configurable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + } + + obj.defineOwnProperty(propName, descr, false) + + vm.sp -= 2 + vm.pc++ +} + +type _setPropSetter1 struct{} + +var setPropSetter1 _setPropSetter1 + +func (s _setPropSetter1) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-3]) + propName := vm.stack[vm.sp-2] + val := vm.stack[vm.sp-1] + + vm.r.toObject(val).self.defineOwnPropertyStr("name", PropertyDescriptor{ + Value: funcName("set ", propName), + Configurable: FLAG_TRUE, + }, true) + + descr := PropertyDescriptor{ + Setter: val, + Configurable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + } + + obj.defineOwnProperty(propName, descr, false) + + vm.sp -= 2 + vm.pc++ +} + type getProp unistring.String func (g getProp) exec(vm *vm) { @@ -1487,6 +1658,24 @@ func (_getElem) exec(vm *vm) { vm.pc++ } +type _getKey struct{} + +var getKey _getKey + +func (_getKey) exec(vm *vm) { + v := vm.stack[vm.sp-2] + obj := v.baseObject(vm.r) + propName := vm.stack[vm.sp-1] + if obj == nil { + panic(vm.r.NewTypeError("Cannot read property '%s' of undefined", propName.String())) + } + + vm.stack[vm.sp-2] = nilSafe(obj.get(propName, v)) + + vm.sp-- + vm.pc++ +} + type _getElemCallee struct{} var getElemCallee _getElemCallee @@ -1544,32 +1733,77 @@ func (_newObject) exec(vm *vm) { type newArray uint32 func (l newArray) exec(vm *vm) { - values := make([]Value, l) - if l > 0 { - copy(values, vm.stack[vm.sp-int(l):vm.sp]) - } - obj := vm.r.newArrayValues(values) - if l > 0 { - vm.sp -= int(l) - 1 - vm.stack[vm.sp-1] = obj + values := make([]Value, 0, l) + vm.push(vm.r.newArrayValues(values)) + vm.pc++ +} + +type _pushArrayItem struct{} + +var pushArrayItem _pushArrayItem + +func (_pushArrayItem) exec(vm *vm) { + arr := vm.stack[vm.sp-2].(*Object).self.(*arrayObject) + if arr.length < math.MaxUint32 { + arr.length++ } else { - vm.push(obj) + panic(vm.r.newError(vm.r.global.RangeError, "Invalid array length")) + } + val := vm.stack[vm.sp-1] + arr.values = append(arr.values, val) + if val != nil { + arr.objCount++ } + vm.sp-- vm.pc++ } -type newArraySparse struct { - l, objCount int +type _pushArraySpread struct{} + +var pushArraySpread _pushArraySpread + +func (_pushArraySpread) exec(vm *vm) { + arr := vm.stack[vm.sp-2].(*Object).self.(*arrayObject) + vm.r.iterate(vm.r.getIterator(vm.stack[vm.sp-1], nil), func(val Value) { + if arr.length < math.MaxUint32 { + arr.length++ + } else { + panic(vm.r.newError(vm.r.global.RangeError, "Invalid array length")) + } + arr.values = append(arr.values, val) + arr.objCount++ + }) + vm.sp-- + vm.pc++ } -func (n *newArraySparse) exec(vm *vm) { - values := make([]Value, n.l) - copy(values, vm.stack[vm.sp-int(n.l):vm.sp]) - arr := vm.r.newArrayObject() - setArrayValues(arr, values) - arr.objCount = n.objCount - vm.sp -= int(n.l) - 1 - vm.stack[vm.sp-1] = arr.val +type _pushSpread struct{} + +var pushSpread _pushSpread + +func (_pushSpread) exec(vm *vm) { + vm.sp-- + obj := vm.stack[vm.sp] + vm.r.iterate(vm.r.getIterator(obj, nil), func(val Value) { + vm.push(val) + }) + vm.pc++ +} + +type _newArrayFromIter struct{} + +var newArrayFromIter _newArrayFromIter + +func (_newArrayFromIter) exec(vm *vm) { + var values []Value + l := len(vm.iterStack) - 1 + iter := vm.iterStack[l].iter + vm.iterStack[l] = iterStackItem{} + vm.iterStack = vm.iterStack[:l] + vm.r.iterate(iter, func(val Value) { + values = append(values, val) + }) + vm.push(vm.r.newArrayValues(values)) vm.pc++ } @@ -2032,7 +2266,7 @@ func newStashRef(typ varType, name unistring.String, v *[]Value, idx int) ref { idx: idx, }, } - case varTypeConst: + case varTypeConst, varTypeStrictConst: return &stashRefConst{ stashRefLex: stashRefLex{ stashRef: stashRef{ @@ -2041,6 +2275,7 @@ func newStashRef(typ varType, name unistring.String, v *[]Value, idx int) ref { idx: idx, }, }, + strictConst: typ == varTypeStrictConst, } } panic("unsupported var type") @@ -2100,7 +2335,7 @@ func (r *resolveMixedStack) exec(vm *vm) { if r.idx > 0 { idx = vm.sb + vm.args + r.idx } else { - idx = vm.sb + r.idx + idx = vm.sb - r.idx } ref = newStashRef(r.typ, r.name, (*[]Value)(&vm.stack), idx) @@ -2289,6 +2524,22 @@ func (numargs callEvalStrict) exec(vm *vm) { vm.callEval(int(numargs), true) } +type _callEvalVariadic struct{} + +var callEvalVariadic _callEvalVariadic + +func (_callEvalVariadic) exec(vm *vm) { + vm.callEval(vm.countVariadicArgs()-2, false) +} + +type _callEvalVariadicStrict struct{} + +var callEvalVariadicStrict _callEvalVariadicStrict + +func (_callEvalVariadicStrict) exec(vm *vm) { + vm.callEval(vm.countVariadicArgs()-2, true) +} + type _boxThis struct{} var boxThis _boxThis @@ -2303,6 +2554,46 @@ func (_boxThis) exec(vm *vm) { vm.pc++ } +var variadicMarker Value = newSymbol(asciiString("[variadic marker]")) + +type _startVariadic struct{} + +var startVariadic _startVariadic + +func (_startVariadic) exec(vm *vm) { + vm.push(variadicMarker) + vm.pc++ +} + +type _callVariadic struct{} + +var callVariadic _callVariadic + +func (vm *vm) countVariadicArgs() int { + count := 0 + for i := vm.sp - 1; i >= 0; i-- { + if vm.stack[i] == variadicMarker { + return count + } + count++ + } + panic("Variadic marker was not found. Compiler bug.") +} + +func (_callVariadic) exec(vm *vm) { + call(vm.countVariadicArgs() - 2).exec(vm) +} + +type _endVariadic struct{} + +var endVariadic _endVariadic + +func (_endVariadic) exec(vm *vm) { + vm.sp-- + vm.stack[vm.sp-1] = vm.stack[vm.sp] + vm.pc++ +} + type call uint32 func (numargs call) exec(vm *vm) { @@ -2325,6 +2616,16 @@ repeat: vm.pc = 0 vm.stack[vm.sp-n-1], vm.stack[vm.sp-n-2] = vm.stack[vm.sp-n-2], vm.stack[vm.sp-n-1] return + case *arrowFuncObject: + vm.pc++ + vm.pushCtx() + vm.args = n + vm.prg = f.prg + vm.stash = f.stash + vm.pc = 0 + vm.stack[vm.sp-n-1], vm.stack[vm.sp-n-2] = f.this, vm.stack[vm.sp-n-1] + vm.newTarget = f.newTarget + return case *nativeFuncObject: vm._nativeCall(f, n) case *boundFuncObject: @@ -2353,7 +2654,7 @@ func (vm *vm) _nativeCall(f *nativeFuncObject, n int) { if f.f != nil { vm.pushCtx() vm.prg = nil - vm.funcName = f.nameProp.get(nil).string() + vm.funcName = nilSafe(f.getStr("name", nil)).string() ret := f.f(FunctionCall{ Arguments: vm.stack[vm.sp-n : vm.sp], This: vm.stack[vm.sp-n-2], @@ -2470,7 +2771,7 @@ func (e *enterFunc) exec(vm *vm) { vm.sb = sp - vm.args - 1 vm.newStash() stash := vm.stash - stash.function = true + stash.variable = true stash.values = make([]Value, e.stashSize) if len(e.names) > 0 { if e.extensible { @@ -2522,6 +2823,98 @@ func (e *enterFunc) exec(vm *vm) { vm.pc++ } +// Similar to enterFunc, but for when arguments may be accessed before they are initialised, +// e.g. by an eval() code or from a closure, or from an earlier initialiser code. +// In this case the arguments remain on stack, first argsToCopy of them are copied to the stash. +type enterFunc1 struct { + names map[unistring.String]uint32 + stashSize uint32 + numArgs uint32 + argsToCopy uint32 + extensible bool +} + +func (e *enterFunc1) exec(vm *vm) { + sp := vm.sp + vm.sb = sp - vm.args - 1 + vm.newStash() + stash := vm.stash + stash.variable = true + stash.values = make([]Value, e.stashSize) + if len(e.names) > 0 { + if e.extensible { + m := make(map[unistring.String]uint32, len(e.names)) + for name, idx := range e.names { + m[name] = idx + } + stash.names = m + } else { + stash.names = e.names + } + } + offset := vm.args - int(e.argsToCopy) + if offset > 0 { + copy(stash.values, vm.stack[sp-vm.args:sp-offset]) + if offset := vm.args - int(e.numArgs); offset > 0 { + vm.stash.extraArgs = make([]Value, offset) + copy(stash.extraArgs, vm.stack[sp-offset:]) + } + } else { + copy(stash.values, vm.stack[sp-vm.args:sp]) + if int(e.argsToCopy) > vm.args { + vv := stash.values[vm.args:e.argsToCopy] + for i := range vv { + vv[i] = _undefined + } + } + } + + vm.pc++ +} + +// Finalises the initialisers section and starts the function body which has its own +// scope. When used in conjunction with enterFunc1 adjustStack is set to true which +// causes the arguments to be removed from the stack. +type enterFuncBody struct { + enterBlock + extensible bool + adjustStack bool +} + +func (e *enterFuncBody) exec(vm *vm) { + if e.stashSize > 0 || e.extensible { + vm.newStash() + stash := vm.stash + stash.variable = true + stash.values = make([]Value, e.stashSize) + if len(e.names) > 0 { + if e.extensible { + m := make(map[unistring.String]uint32, len(e.names)) + for name, idx := range e.names { + m[name] = idx + } + stash.names = m + } else { + stash.names = e.names + } + } + } + sp := vm.sp + if e.adjustStack { + sp -= vm.args + } + nsp := sp + int(e.stackSize) + if e.stackSize > 0 { + vm.stack.expand(nsp - 1) + vv := vm.stack[sp:nsp] + for i := range vv { + vv[i] = nil + } + } + vm.sp = nsp + vm.pc++ +} + type _ret struct{} var ret _ret @@ -2578,17 +2971,30 @@ func (e *enterFuncStashless) exec(vm *vm) { type newFunc struct { prg *Program name unistring.String + source string + length uint32 strict bool - - srcStart, srcEnd uint32 } func (n *newFunc) exec(vm *vm) { obj := vm.r.newFunc(n.name, int(n.length), n.strict) obj.prg = n.prg obj.stash = vm.stash - obj.src = n.prg.src.Source()[n.srcStart:n.srcEnd] + obj.src = n.source + vm.push(obj.val) + vm.pc++ +} + +type newArrowFunc struct { + newFunc +} + +func (n *newArrowFunc) exec(vm *vm) { + obj := vm.r.newArrowFunc(n.name, int(n.length), n.strict) + obj.prg = n.prg + obj.stash = vm.stash + obj.src = n.source vm.push(obj.val) vm.pc++ } @@ -2752,7 +3158,7 @@ func (d *bindVars) exec(vm *vm) { if idx, exists := s.names[name]; exists && idx&maskVar == 0 { panic(vm.alreadyDeclared(name)) } - if s.function { + if s.variable { target = s break } @@ -2834,6 +3240,28 @@ func (j jneq1) exec(vm *vm) { } } +type jdef int32 + +func (j jdef) exec(vm *vm) { + if vm.stack[vm.sp-1] != _undefined { + vm.pc += int(j) + } else { + vm.sp-- + vm.pc++ + } +} + +type jdefP int32 + +func (j jdefP) exec(vm *vm) { + if vm.stack[vm.sp-1] != _undefined { + vm.pc += int(j) + } else { + vm.pc++ + } + vm.sp-- +} + type _not struct{} var not _not @@ -3116,6 +3544,14 @@ func (_throw) exec(vm *vm) { panic(vm.stack[vm.sp-1]) } +type _newVariadic struct{} + +var newVariadic _newVariadic + +func (_newVariadic) exec(vm *vm) { + _new(vm.countVariadicArgs() - 1).exec(vm) +} + type _new uint32 func (n _new) exec(vm *vm) { @@ -3154,7 +3590,7 @@ func (_typeof) exec(vm *vm) { case *Object: repeat: switch s := v.self.(type) { - case *funcObject, *nativeFuncObject, *boundFuncObject: + case *funcObject, *nativeFuncObject, *boundFuncObject, *arrowFuncObject: r = stringFunction case *lazyObject: v.self = s.create(v) @@ -3177,9 +3613,9 @@ func (_typeof) exec(vm *vm) { vm.pc++ } -type createArgs uint32 +type createArgsMapped uint32 -func (formalArgs createArgs) exec(vm *vm) { +func (formalArgs createArgsMapped) exec(vm *vm) { v := &Object{runtime: vm.r} args := &argumentsObject{} args.extensible = true @@ -3216,9 +3652,9 @@ func (formalArgs createArgs) exec(vm *vm) { vm.pc++ } -type createArgsStrict uint32 +type createArgsUnmapped uint32 -func (formalArgs createArgsStrict) exec(vm *vm) { +func (formalArgs createArgsUnmapped) exec(vm *vm) { args := vm.r.newBaseObject(vm.r.global.ObjectPrototype, "Arguments") i := 0 c := int(formalArgs) @@ -3323,11 +3759,23 @@ var enumPopClose _enumPopClose func (_enumPopClose) exec(vm *vm) { l := len(vm.iterStack) - 1 - if iter := vm.iterStack[l].iter; iter != nil { - returnIter(iter) - } + item := vm.iterStack[l] vm.iterStack[l] = iterStackItem{} vm.iterStack = vm.iterStack[:l] + if iter := item.iter; iter != nil { + returnIter(iter) + } + vm.pc++ +} + +type _iterateP struct{} + +var iterateP _iterateP + +func (_iterateP) exec(vm *vm) { + iter := vm.r.getIterator(vm.stack[vm.sp-1], nil) + vm.iterStack = append(vm.iterStack, iterStackItem{iter: iter}) + vm.sp-- vm.pc++ } @@ -3338,7 +3786,6 @@ var iterate _iterate func (_iterate) exec(vm *vm) { iter := vm.r.getIterator(vm.stack[vm.sp-1], nil) vm.iterStack = append(vm.iterStack, iterStackItem{iter: iter}) - vm.sp-- vm.pc++ } @@ -3382,6 +3829,7 @@ func (copyStash) exec(vm *vm) { } vm.stashAllocs++ newStash.values = append([]Value(nil), oldStash.values...) + newStash.names = oldStash.names vm.stash = newStash vm.pc++ } @@ -3393,3 +3841,156 @@ var throwAssignToConst _throwAssignToConst func (_throwAssignToConst) exec(vm *vm) { panic(errAssignToConst) } + +func (r *Runtime) copyDataProperties(target, source Value) { + targetObj := r.toObject(target) + if source == _null || source == _undefined { + return + } + sourceObj := source.ToObject(r) + for _, key := range sourceObj.self.ownPropertyKeys(false, nil) { + v := nilSafe(sourceObj.get(key, nil)) + createDataPropertyOrThrow(targetObj, key, v) + } +} + +type _copySpread struct{} + +var copySpread _copySpread + +func (_copySpread) exec(vm *vm) { + vm.r.copyDataProperties(vm.stack[vm.sp-2], vm.stack[vm.sp-1]) + vm.sp-- + vm.pc++ +} + +type _copyRest struct{} + +var copyRest _copyRest + +func (_copyRest) exec(vm *vm) { + vm.push(vm.r.NewObject()) + vm.r.copyDataProperties(vm.stack[vm.sp-1], vm.stack[vm.sp-2]) + vm.pc++ +} + +type _createDestructSrc struct{} + +var createDestructSrc _createDestructSrc + +func (_createDestructSrc) exec(vm *vm) { + v := vm.stack[vm.sp-1] + vm.r.checkObjectCoercible(v) + vm.push(vm.r.newDestructKeyedSource(v)) + vm.pc++ +} + +type _checkObjectCoercible struct{} + +var checkObjectCoercible _checkObjectCoercible + +func (_checkObjectCoercible) exec(vm *vm) { + vm.r.checkObjectCoercible(vm.stack[vm.sp-1]) + vm.pc++ +} + +type createArgsRestStack int + +func (n createArgsRestStack) exec(vm *vm) { + var values []Value + delta := vm.args - int(n) + if delta > 0 { + values = make([]Value, delta) + copy(values, vm.stack[vm.sb+int(n)+1:]) + } + vm.push(vm.r.newArrayValues(values)) + vm.pc++ +} + +type _createArgsRestStash struct{} + +var createArgsRestStash _createArgsRestStash + +func (_createArgsRestStash) exec(vm *vm) { + vm.push(vm.r.newArrayValues(vm.stash.extraArgs)) + vm.stash.extraArgs = nil + vm.pc++ +} + +type concatStrings int + +func (n concatStrings) exec(vm *vm) { + strs := vm.stack[vm.sp-int(n) : vm.sp] + length := 0 + allAscii := true + for _, s := range strs { + if allAscii { + if _, ok := s.(unicodeString); ok { + allAscii = false + } + } + length += s.(valueString).length() + } + + vm.sp -= int(n) - 1 + if allAscii { + var buf strings.Builder + buf.Grow(length) + for _, s := range strs { + buf.WriteString(string(s.(asciiString))) + } + vm.stack[vm.sp-1] = asciiString(buf.String()) + } else { + var buf unicodeStringBuilder + buf.Grow(length) + for _, s := range strs { + buf.WriteString(s.(valueString)) + } + vm.stack[vm.sp-1] = buf.String() + } + vm.pc++ +} + +type getTaggedTmplObject struct { + raw, cooked []Value +} + +// As tagged template objects are not cached (because it's hard to ensure the cache is cleaned without using +// finalizers) this wrapper is needed to override the equality method so that two objects for the same template +// literal appeared be equal from the code's point of view. +type taggedTemplateArray struct { + *arrayObject + idPtr *[]Value +} + +func (a *taggedTemplateArray) equal(other objectImpl) bool { + if o, ok := other.(*taggedTemplateArray); ok { + return a.idPtr == o.idPtr + } + return false +} + +func (c *getTaggedTmplObject) exec(vm *vm) { + cooked := vm.r.newArrayObject() + setArrayValues(cooked, c.cooked) + cooked.lengthProp.writable = false + + raw := vm.r.newArrayObject() + setArrayValues(raw, c.raw) + raw.lengthProp.writable = false + raw.preventExtensions(true) + raw.val.self = &taggedTemplateArray{ + arrayObject: raw, + idPtr: &c.raw, + } + + cooked._putProp("raw", raw.val, false, false, false) + cooked.preventExtensions(true) + cooked.val.self = &taggedTemplateArray{ + arrayObject: cooked, + idPtr: &c.cooked, + } + + vm.push(cooked.val) + vm.pc++ +} diff --git a/vendor/github.com/felixge/httpsnoop/go.mod b/vendor/github.com/felixge/httpsnoop/go.mod deleted file mode 100644 index 73b394690..000000000 --- a/vendor/github.com/felixge/httpsnoop/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/felixge/httpsnoop - -go 1.13 diff --git a/vendor/github.com/glycerine/go-unsnap-stream/.gitignore b/vendor/github.com/glycerine/go-unsnap-stream/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/glycerine/go-unsnap-stream/LICENSE b/vendor/github.com/glycerine/go-unsnap-stream/LICENSE deleted file mode 100644 index a441b993b..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT license. - -Copyright (c) 2014 the go-unsnap-stream authors. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/glycerine/go-unsnap-stream/README.md b/vendor/github.com/glycerine/go-unsnap-stream/README.md deleted file mode 100644 index 932291f74..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/README.md +++ /dev/null @@ -1,22 +0,0 @@ -go-unsnap-stream -================ - -This is a small golang library for decoding and encoding the snappy *streaming* format, specified here: https://github.com/google/snappy/blob/master/framing_format.txt - -Note that the *streaming or framing format* for snappy is different from snappy itself. Think of it as a train of boxcars: the streaming format breaks your data in chunks, applies snappy to each chunk alone, then puts a thin wrapper around the chunk, and sends it along in turn. You can begin decoding before receiving everything. And memory requirements for decoding are sane. - -Strangely, though the streaming format was first proposed in Go[1][2], it was never upated, and I could not locate any other library for Go that would handle the streaming/framed snappy format. Hence this implementation of the spec. There is a command line tool[3] that has a C implementation, but this is the only Go implementation that I am aware of. The reference for the framing/streaming spec seems to be the python implementation[4]. - -Update to the previous paragraph: Horray! Good news: Thanks to @nigeltao, we have since learned that the [github.com/golang/snappy](https://github.com/golang/snappy) package now provides the snappy streaming format too. Even though the type level descriptions are a little misleading because they don't mention that they are for the stream format, the [snappy package header documentation](https://godoc.org/github.com/golang/snappy) points out that the [snappy.Reader](https://godoc.org/github.com/golang/snappy#Reader) and [snappy.Writer](https://godoc.org/github.com/golang/snappy#Writer) types do indeed provide stream (vs block) handling. Although I have not benchmarked, you should probably prefer that package as it will likely be maintained more than I have time to devote, and also perhaps better integrated with the underlying snappy as they share the same repo. - -For binary compatibility with the [python implementation](https://pypi.python.org/pypi/python-snappy) in [4], one could use the C-snappy compressor/decompressor code directly; using github.com/dgryski/go-csnappy. In fact we did this for a while to verify byte-for-byte compatiblity, as the native Go implementation produces slightly different binary compression (still conformant with the standard of course), which made test-diffs harder, and some have complained about it being slower than the C. - -However, while the c-snappy was useful for checking compatibility, it introduced dependencies on external C libraries (both the c-snappy library and the C standard library). Our go binary executable that used the go-unsnap-stream library was no longer standalone, and deployment was painful if not impossible if the target had a different C standard library. So we've gone back to using the snappy-go implementation (entirely in Go) for ease of deployment. See the comments at the top of unsnap.go if you wish to use c-snappy instead. - -[1] https://groups.google.com/forum/#!msg/snappy-compression/qvLNe2cSH9s/R19oBC-p7g4J - -[2] https://codereview.appspot.com/5167058 - -[3] https://github.com/kubo/snzip - -[4] https://pypi.python.org/pypi/python-snappy diff --git a/vendor/github.com/glycerine/go-unsnap-stream/binary.dat b/vendor/github.com/glycerine/go-unsnap-stream/binary.dat deleted file mode 100644 index f31eee2e24025b5f68a8b4c0fdad4af60b6db461..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5592 zcmb2|=3sbw=wc89^V^u}@>|EYz2E;V*yp7eQ=d}LV;es8bD9S&9$sUUKj2#&tlu|* zZE=K0jP08pnfrhLd~3T{{FvXRWy^P_&)nKH(PVS_{`gs*kKFfdHSurG|F`G7m|eg8 z`wRD6Y`i(;{`gO4r(ej{JL( z8^7dBeBb!|zrVfrX}isSOFbg^wWhi`-#GZmsKo#I#uYb9H}1V!^MfJYy6uO&HdoC> zQ&;IV2AMA!v%T#n=@#CQo^4ZU&*7?(ZZu_|F7Lx%+ooF>F$K7DX^VY~>B51O7c`+XL+mggjCv8z?Jo?qYQeollJx5%!9k-`B?%0qN z+_R+VhLXUtOf#MPo0#u!lv>B{Ssiz{cscLZQ`>$n5!|`4X2DDWQ;~q;O3%&5C9Sxs z&9ych`y~5N^ObC(Yee4WPq9y}FGi-uT^7i_&v~%vu)>tQjLvki`1n(lJa#H`%%^v-nKmYD=Hw9nc<`aJ`dwHI7O0waHIUY9E;`Q>4K07)7 z7+20*ckP?a!5eM@Q7VUvU(WD37GRYp(*DjQ@tN>ePZqHsN?emJOsh$M^((C6o?F`m=;xQ9+Jmd6y zyDBa@9n}7d(FP=OjX_4_fn8RCJl~EcmfxssJ^iiwo@4%>w(vXe zdw*}aWj8Imd-1n|JFjKrx3oWcQBh$cA!wkKx8D4f%$Dl*ySBm-385GNZoDnueR1`- zgN%$!rydm;y!qCa`*9KXlw10IySTJ6ZoTH+J$IwyjIUqwm00#LnX$J?$d+2${<`>g zCHIxvht1h8^85Af`fqbg!A7L|?cr~y-*Gr($tN;BxnL<*#?R?|!PctIQ0n`! zSW3Jx%yz%~uz#;#`IqtrYl)s2_Qw~mYYaV>8z;LTv_xAjD7dJs$_ZIMePhq6AB{WC zIBr>JBYJn1Zql#EY_E1dJhUOEUysS>;QXDUXQj(B?C&g_{Ul?e0o(Fd zfyssatEO71Gb``AwY)&*O~9d_9Nrd*8)_c>;(s7v%jNMQq_dzE1z|_hoau>5IzTv{jtXxEHMpTQ5CpVfg$jr@1rN)%w?|-{A0URs75C^vrGXL*`+FP9&A zw8p$`$Lvd|?M2<%4kTYPnqX&m#D($aUxPc6%sn~ch7hIoda(U@iKhxUHWu) z8Si()Kif|oGg;&_^+Vj#pwz?cYE$~IY}h2_F67nzb>S4#{eE4qT28D~nz(Y~hpBqu z63X#5;?aAy1)ceQfKS}3mUWZ;gHt;a-b6We-Hqja@Jw()`PySoKPy^&Vs{X53)nqF zS^B~&p6v?ar+;kH%dwcrt?4&0He=t+s~+A1$usn2J$ZhBq0K=)(x ztc9F+j7%i%{O)~uMqc3QN-e%=2j<%L1x07bPq%0N8xwlurqN|9i={jVL|uObBy4~E zQO2!hN{`z8v(pzWT;r=db-iSX@2U1|op$Y`8>Zd(^3-+hi%kgy36nl)%&A_jR#C8K3YhVN8`>zKw-TrLF&$;I7c*}y@cBFES+;-X#G}e`fiDzQ zbk-^Dv}t8_^E!JpSdo}+B+ z<8sT^)ym=0DjWN+zYgVd+DH0`uAgDfx+O#98lV33kO0j!BHdvz-X5ns*Y%V{CGU@% z(iM0t`Rl_qYc@WS;QW2u`jqLi{o%4Nj+tDJnHp%or(v1>Wcr6cT}+STPL@BE3cCG( zHSyjL*V7mEtCmGf)LA*J;?IkVhmX&44o^*+kdb&)kaNcc3x}HT&Sf7Z=c$Q)_#0)# zf5kYXtkFwnMWvq86zx4TwizFrP$TCUoj&u|xxACUn;)kr6+NH2%f51L_35)R-CCR? zd-#9LKRvai{S()_Y`=Tn8`-8Nc}0q3ELOIS+F*P;ps_R7^nnJ0d{BYN#8`VqjZUE} z3$o5m)7V(zw~WJ6_JGzJFUh#T6Q>e4Ii$}!I#)*PS;ofT#Ph=ceO%t@Z}2;B`}Jl@ zqOIW8Y%S^MMsp7BvfQ$8)l>#%6U8*uCxJXm#MVcAcHhr1_gR=#!o2%;Jl_|#&A!KE z<#O$im22V6npKh+H_Ij{=qBoL=dS1oSLzXcuqUD1xydKRWftG5yl0o}x+k&f8}~m> zU43zf-rBqs|J;(L*u*F1C=`D^FSYBf>BZdjKWcrJ7;iq%eQVjIBHQlISqmRHH6scF>1@TSbz4Bo~2__I&zU-`qt!J7DM+sz!)a=(mMJTXDd z;VbX2%+`G|VcMm~x^nVb?tgu_*G)+K_+nA@%%(lYsZ&`4zQ2EL6#f0}Vd1C!<`$)A z-br(P-F{X~anm$`F=joo9tg!dwgrnXNfKLcg`(O4_1p=pE!+adnuEWnUcY^ zdE6SUMM~RP59{-&3*9@PrmOUbd1rR&?&(If0m2NVVvpaPmh`r(JJLMppfB^AU4mJjvU4wG zY!0*Dwyo%G!~MuM4wZtSKGEovOD=aTige<+8L!o+@K^3=gy5rf%VzOzZa>-GZRzPP z{?saHPISlLQ!TP)i3}@WR+cVKsNpXQy7w}m+v1Dh^YHGQw~3#bJF?$PM(@eq;=Ff$Yc)?9 zeR{RcRWsW%V#Rjngcn8&wz*kZTigy^93>Dbq2~P~s($7Z=8lODK|eQq$-6kkqfJ@& zu-2jN&jWVbo~`pTyDaqmoy+$tr)4F?+TOgXW4Ul)-i*zO%F)wi1g{S_D)CFzSvIZK zEplaU<-Cx5mjy}-{5Lk~YC9Ygikp4@;bQqEWWv`eO%@d@>yJ&1?t?!A0Q@#qa@tz`)vBR@X^;t zl_&JrXYJS`)4u!voN2Da@LW(W&2? z?(VMJR{!8sLEb)rotX?v?|-#FcQf0fLiX_KIco!wj5iDE=RN(~F=@Ha9|5bXSBp24 zN_EPsdRI-KHf>AO%rf;=Qmks%Ld_R5$$P#P|4|weC;#HuEROTFi`|^<16{tnj$9qQ zT}%I2rJ}n-`h|GhNw{m@z;qcm8Q;?zy{ep&`Nd z94hxPZ};YWE&j?Qp>g^`Q_)LKC!Vv9=AU2oTQ4C* z^JJ9(JHN%vpqK+Jx2A>UG#!eZc5A+P$-#dXYr3O$rdB5J|FkM!Yk%s_+=SYLY}dZz zzJ8+p%H*!uEW5oIOKwN+daGM2x_9aAz6;lH3htUTWqtOMDK7tx-j+B2VrRFX<>W8_ z{rg{d7|QW~V_Vz5UF+XO-yQ2x^rTO2{rNRL^3-yXz424$#ZCRRDzC0?<4;xZv-aLW z@y?Y$+)w=K&f2`@T~PJE_paLN0z7^*H*e-$>*cC?ck;Q_kN@#cKK#vp!Sv0Cz8uW3 zd9#gQ{juY(*GCU7_mw`bU29)w{CpOFGpX76Uh$o~ z!Jk{Zl3qT3{O=dz*%Al7Js&E}yycVA%6}+7ll$&&|D~@lBm2WNrj2I}CrQdg{aYC_ z`}6%zoc$bAHP*b{`!8}oXH2jL?{1^C)Pu9#OoF#)XHI?m>zz{Mg?;(a?_UL`O}Ene zq_Riee$$lGb1prT42o`lm7?9r{N&HN*u?d(PK4HdXZw5dhiKi!Zv|`e%(ly~oIB$K zi(%It-)|hxW;nc>>vcdgtXli)jM%5!&p0o+WV2z}I%~0@4Z-#wV(z6M+M8yVE97*3 zV#mb*=dB^BQ`p&VZ@IdX{a&#BULXGRORSfkl1=_}`R20YEqe_E9w%u|dR5}|)4+Gz z_IX$3x2{q$n>$_!na$fA#44ruFk|h0m&gwYqcgT6OmC zmu7bk2XA%G-kek&Cc-)OQee`;C*mzz6MpgCS#kKoiyVioQL@7GuY5QYB3eD?&>nWd zO<%s;V48F-)!|N%^$CffnOR1kS>7G&x@z_1rxR=5$BJ|I9Xoel-1+~<%ew{NJ%e?_ z7HCX0dvzy8>ZHZehI?`?#;2!Ao!`>;|7h{@B|AIS-@A&IRxMibr>lD6iisOCH$)~} z`VuRStM@$@#c>&~G!PZi0ww#D7G2{*8Ip3>mkd|>r!_m!%z z_&v@X?5bvYJa6Im`4d+~Rhi4>pV|~-e_vw0dYWrR56kY!8J_K385I-qyPPl1d@0Vq zv^Z{0!^)7LtIn2t{%jQ!STKF3o%)IfPX&bwswu0^+ipx>#QV-WEArE=B}r2RMIEP= za^(eg$MiJ)xgxm#XQ;-j$1bPWynpk{wID~$?rL1tckA1o?#JhINnbxBIJ3wgETZi=AaKI`aI=g8=uWnyx5Y3HAGoB2+d zAFP}gex+;YE_0+rpU&^Bu1~MmuX??_d+qAVpjoZ2RbtH3 ze=tPci#t(ms>S^LPTkR6D;byk4nFXDR?dr?d4i9p+Wg!#XZeP=hDZI&Kgiyao%8g< zl?p}^`yC%d?_@1wTm7qe{>)F2UMU;6IG1|nmq&3O>s+}0XIJs3O(A91J(sH3iru;T zr*v+_i#6@9=SRruQVh;cRs=YKpZRd+p{*{w#_brTE+`jJpl^D%TZw9VgD@A;YH_j<} zC00vC*|DA~X70e){?OZ7gzL*CFvITz=kv{U0kPa|-Y1`r-fEUm@?` zfB9{4dAX_o|2uzv_-%3iwpq9RB-myc6FqEt|h2 zkK^o=f}775Rds18%q;R{Z5KE;T|_HI#?+5_$IIf%n*sGdVl>t7CNrGiXOtDcssGn~n@!Cpg1|#n%S+fQRGYaE+D<+?`fS;vshzP!QZ+w4Q)%a7&zhctd@l*sNV@$AdzPE$WQ z>t2R#z=^A?dR4eyo+vzAm>#n1=ETfdEY(t}A_w07Ebs(#k0wJ}u_4`iq2+kN%Cl)1!g-kN!Vii!P9_f~c&HgtN6 zm3;p%#J#bkJ2|B8&6YKb=K2(vzgTwaen9n fch&L5x8mPB|K6#~v;Y6`{C>vHo`59`0SpWPX9(xm diff --git a/vendor/github.com/glycerine/go-unsnap-stream/binary.dat.snappy b/vendor/github.com/glycerine/go-unsnap-stream/binary.dat.snappy deleted file mode 100644 index ed370242933385f9e66c2020fa71e3828bf56abe..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5610 zcmey*#=ubQml#kG$#_SUA!f;@gYw-R3{MYT3}Rq@8&h3=>)5vU`=15-y!2w~Q|fta z!>4{u^Pt7UYi#lde2ati`zEk0j_`=FeX}ET|L>n~Z5N9l^SiWc`Ofs2Tbm}DY);=F zKg;ux`@XFv{>}OS_M8{9>z99j;r_G9?$Jm0|FEgq^!Ld{^RsW?9=GN%-&R&uu>WSw zr8_^}a{hK&q|R6AS{u-je=l<5mwbuu8=wF8xA#76x7ly0M+CptR5#}v2R|8=_&?vc z;%4c_y;o~~FvMH8{gBt@s<~+DD!s-a^F?E}xBVpD!W+`FZ7S_KTs6{-rtH(@efVqJ zbPFS<0Cz5Jv5zrbnF7ZTI0MFlKAPQEy|2X zzxr1yEZn{4h)cHP_B6*G8-asZ;|>=u=iPd0+s`F} zJ2%!Wm?>Z?5>Q;}x%s%H6<4*n)`nxBWFKn2l1+4t$lLrW_Nn#7$ke#Y0=f4&4>lcE zn39*#nJyMzzy6=jmDtOCV&@I_{|Ws3ed%+(r7v1e>OZ>Kqo4EV-#zZ8;OpCb;*Vu7 z&vQ;mHvBNh!=_riUcS+1C&wS-%9-n~eX}`u!%ZMc<#6%K89v7Xtnx(K-y1w_GaU}_^Y(3VIx~0LgdCCo@Wh_@_u{}2u zk3Obq-w}K1dop_6gJK>h4Q{GF<@VfLvos@ci3j7JkE`!_^sxPYeEn@=_=E2k3qB<0uivd6x98yD zbuW1O?(tOX$M+sxeehshxrNZ%e{%1>9Y1`p`|(Ex^HpyJ)51>Xd{}X}UU`?jEbs2c z6D4?D-+y?qfW4r>KIi-Kz3)y4?5%tLpkn>?`b>@nfxibMWWGGmG|aIv$cQ|!%PNrP z+tI}G8UCCe&>Df?=83Nre$|8{#J13wT%3h_D3%&Doi8<4V3cMo4=CT zQr&*nR#+k-^y1%*x8=JpuKsqAk&)@tqXL6B-`a9NF5;eYOP_BSmsZBD*Sx#uZgiaS z^=rNo%N`~(_BILGQfu2^7yqv0zHrHM5ZSf zEal4hIh`-qTGbg!eLr^m@wy62i8qGX?sp&d@AWJHQr=)K(KEyT_~Lbqp~rILWE>96 zxp@EO_wL7m{Wq_??%#d*;m3`9n^!mS;Zy1-4o>$tDS&`vMV zXv+l!7nM~xA{<1ramN|QEemZ#@6OUq`t_LY)$WIfHpKMnG5H*vzf<(AbXkV| zon^D1WK1+*TRv}z#ifW>8-6}8xv+oLR4a96<$br77wEhRIP{al+ahs8&4XY34p>B){U3roLEcT4#RuEuWlcJuABRrfghuwNv;4(`SyD zj`XdomPr+f&->ypsQAu8ruoET=Y+u5>Hqz{Y>qd5QJI^zit`!wqIF^GrDrV+pMT{v zcjmg9Urt$vcD07hyV4n)?J4JEn&tiRTT4oyxas^G9G{ciYY`>A6li+rYjhJ0Akn@g_iNu}Xy)Vzm3p`z^#W(H1T-&~&=nVPk_N;$n zLXX@ux@={!l;?n`>yLnh?XN$|xV22_QM-S3`htaPe08U;mn`u;)t;@>u6=aFv>RWZ zx~_e(DWM=?(kG2M)vL99sx8+FO5eI!`>KZFf{icZ6P_iEsdDRP6(8qT;uJroqiptK z=B))jf9E{Q_RpMnR9P0E&;W@Q!2df zP1f+9@;a(^!UQp9vGmPzl#P8{ZrQq8Ib2$0WB>Knp?psJNFUMlGt61HWT;%@)1Mv^ zpt(k*J1oZA2ci2@`qAEw;!-3-uvNt`l5c-vWST~D`!>wd2#XZ@mbE{sc9235|0XU z?zmv#Q1jin?4#s7HPH`$qpbL^7-y6rJ&Y@kFTNbXG%D`-*n5Oz9kY|b5`iRf& z`x)jw3$sd?cmIy(`@*)__n53)t{t*+ExcK?N;2bS*#rgML>=zj6&>M9J)#fxB$PWh z`J}ka;yac1?2=vgBvyUn{>Q1SFYeG=o44YhTe1|J_{1EA;;-kWcD*&dn7jT*tUUW`({wzzu+q0Z&tsUiBj1Do0_%R2#{GH|$ z^z512EZ6sP8*aSvzmfDV#7@ z=$pZoYqEElEVu7rVf=Wm{CVUy%U1sR*X;RD`91P)Y)dRPjd~c~lo^}ByLcae_G$eq zf0#H}6Mt>HnPXb+m+^`xCa5`l<^7e}x-TY7yYyIBPF~CXuMhXS327f+EUKQ_w8uDg zDr>;^_m7RDzrQ^!{IuWPqV&u=X|Au^&x$FodfqJXVBsp?&j)&w{mW{PZ>{+(vBmz* zx#j7>YBB2*r!j3WWl}OzGPpL6Tcfo|X&dWdeI9k8d*{=1l^!wg%ubzrsPuuc#-}#X zGM~VMnoqK~T&wJvd__5bqJFsJMq!<40zPXwj$Jr9Wqz|uFw0YR?xl>)VbPCPf`wfYqP z${mdme6()aEZ)uSC%d~XJ-x-BS_RFC?)ZDEMb<2lVdcxp(!~ii{AEG+UIuhqd=Z=+ z*yEcVv3i>8=HHnDtLJMhn7C!9^CZn*yEdjWNF5b!?Q?$VdLYqtzieP!y8)o3VbSM`G5LjccDB-WpVykl}x0 zy7(%m2Qz1Ktx4Ip`evM$usQFFT*sr%x=)|25q61a58MQUpcV?pwZm`l+! z6fEZkA2(zbJ3Eu1?)}8HQwt)h3f?X}oNwf@djYQmlT0G_iXb1;`rwU8j5(GU+|w`3 zH27AwHl^Z-j}&X!r1uweVpjk2+0E3tH0HdoQ)XI8z^;RZtXf|C7lzJA6e!SrFm*!U zrPK5OPMJCJk#KW!WY+1E4w)W*(l<{u%643T>_C!y)GFtmOCs~{CSG(Gjm@m}J#lc# zS3x%ZQ;e=F9b07DcmJO=%{Bd9kmaIKf%BY4g5NC>duNw? zz;E4+q!s2Y1}jvZwzn-h^?TFZ-F4gQADk-4+b6IylVR!oulDC|W?NLq9zH#1Z9tOo zW+DB&r++&pE%*5&U{&>M@rF{VPI*=Ds_E0FZE2cWroKvwRqa}+`C=w{&$r?~N<-r0 zUmTmoalUr3o3nkO%a_-YtAn>|=|5{+zWqc)H)B`nE8kV$mgugm?Y+aN#lskpd2sSp zuCH<&f6MgFKMl=2ch@a6B>0{~hU*#@r zT{KDLZmEB;xSP@Z^UHqgC4^|6tP)`7x40P;bAaX6w2+*pLy^;N%@;2@_|IZZcht_* z%H;i@R^@B$Pu-cDPMDb8A=9%g2xZ{bD>@;=s4(Lxq{Qd~#a(59Mca-`(xM z^z~(Af0)L!@vPw_NtvjBD??^~zW<4{pJS@VnzwuZMegT}3D)4FF*SItH8ABR$8A__Q>0BnsR#1rDu{s(e1BNv^$xf{8<;9xc=3N(7Nwz ze^34pt-JWGU`?LccKMZaXMA8W?7HLojpNx2hgWmG4rqo|Yk!>)`*iym=OveHHY{6b zEf%yP*#1Mzz4SwS)9iAEoX$_|xESEPH6(QkJKOCoS68y%3%1|u!+(B>_0m(a$)7IY zTz0%=uVKLBB+W^$N}PTg_-@-i@2dRPRZ3=aXA51v7}zOpU}F7hX6)50mn9YdVoc<( z9$nwGex9xHS@o}0cMe{w&i?(`FTOh~4u5!& zHux{7_jj3j@?xaYav{>43Pp-xI^famSTl)SVEndE4XQ%pm zSJBd{MJxVvRZmY$b?H@Vr7>WYvrBaZL-2MS^a{&{Wp=>>rxG6FU(~3D^qa} ze^HFgkIPMdm|Vz~T^*lFUmv4Kta_e%1*}vArpt-ez{)IrQeKBH7lq zxVtvt2G-6~8ho1%tbXmjQuP(T$C-m&)hv(aE&M)z;;N`BbGiIen_}$mOUzeKbFJuM z**!VKv%M>$VnTkG^TnAj#rc;O$L(oY84`5W*>caHtzrTTrth>Vc-+5<6e!8_JX^Nnzt{sg)$e(FP(OazmvgNX4$KYG>d_20+Y|S)>%x~^6>4Q6uU6bm5%k+< z9i8eN8Qrr?Os+2N{F81o-wE@BmGi={bnSdx{IMkU++C9ZGk?mua|ePU0oS8tM#=?jCuMGhKPG{C#p@gn4jONJGyHnv-ezCM3*Xx%OSm;s0N?mnNv~d{N53a+2-7g^`Qf z*S)_IqnYW=z;$b-h)?mxIVG>e3JQ~*xWuk+2=FU5x;8~*MqkTMKR>^XMb7IwB>sfU z&-<_cW5r}n;T>H+{D1o^$bm~-M+v7<^KuK*Z=k3 zUgX^DY3HV}P5hO2;`^m#^OxjtoSjl|^ZBBxE-i(bMZT=<0>`F{Xr;)Q`Z4c#SzLKD zp#DdUruyAvh7L+L2%g_xtadlO%3fIdMg@+5%Lzdl~m^q83S}Ii}VUgv7 zTMNs0Up 0 bytes, it returns the number of bytes -read. It may return the (non-nil) error from the same call or -return the error (and n == 0) from a subsequent call. An instance -of this general case is that a Reader returning a non-zero number -of bytes at the end of the input stream may return -either err == EOF or err == nil. The next Read should -return 0, EOF regardless. - -Callers should always process the n > 0 bytes returned before -considering the error err. Doing so correctly handles I/O errors -that happen after reading some bytes and also both of the -allowed EOF behaviors. - -Implementations of Read are discouraged from returning a zero -byte count with a nil error, and callers should treat that -situation as a no-op. -*/ -// - -func (b *FixedSizeRingBuf) Read(p []byte) (n int, err error) { - return b.ReadAndMaybeAdvance(p, true) -} - -// if you want to Read the data and leave it in the buffer, so as -// to peek ahead for example. -func (b *FixedSizeRingBuf) ReadWithoutAdvance(p []byte) (n int, err error) { - return b.ReadAndMaybeAdvance(p, false) -} - -func (b *FixedSizeRingBuf) ReadAndMaybeAdvance(p []byte, doAdvance bool) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - if b.Readable == 0 { - return 0, io.EOF - } - extent := b.Beg + b.Readable - if extent <= b.N { - n += copy(p, b.A[b.Use][b.Beg:extent]) - } else { - n += copy(p, b.A[b.Use][b.Beg:b.N]) - if n < len(p) { - n += copy(p[n:], b.A[b.Use][0:(extent%b.N)]) - } - } - if doAdvance { - b.Advance(n) - } - return -} - -// -// Write writes len(p) bytes from p to the underlying data stream. -// It returns the number of bytes written from p (0 <= n <= len(p)) -// and any error encountered that caused the write to stop early. -// Write must return a non-nil error if it returns n < len(p). -// -func (b *FixedSizeRingBuf) Write(p []byte) (n int, err error) { - for { - if len(p) == 0 { - // nothing (left) to copy in; notice we shorten our - // local copy p (below) as we read from it. - return - } - - writeCapacity := b.N - b.Readable - if writeCapacity <= 0 { - // we are all full up already. - return n, io.ErrShortWrite - } - if len(p) > writeCapacity { - err = io.ErrShortWrite - // leave err set and - // keep going, write what we can. - } - - writeStart := (b.Beg + b.Readable) % b.N - - upperLim := intMin(writeStart+writeCapacity, b.N) - - k := copy(b.A[b.Use][writeStart:upperLim], p) - - n += k - b.Readable += k - p = p[k:] - - // we can fill from b.A[b.Use][0:something] from - // p's remainder, so loop - } -} - -// WriteTo and ReadFrom avoid intermediate allocation and copies. - -// WriteTo writes data to w until there's no more data to write -// or when an error occurs. The return value n is the number of -// bytes written. Any error encountered during the write is also returned. -func (b *FixedSizeRingBuf) WriteTo(w io.Writer) (n int64, err error) { - - if b.Readable == 0 { - return 0, io.EOF - } - - extent := b.Beg + b.Readable - firstWriteLen := intMin(extent, b.N) - b.Beg - secondWriteLen := b.Readable - firstWriteLen - if firstWriteLen > 0 { - m, e := w.Write(b.A[b.Use][b.Beg:(b.Beg + firstWriteLen)]) - n += int64(m) - b.Advance(m) - - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != firstWriteLen { - return n, io.ErrShortWrite - } - } - if secondWriteLen > 0 { - m, e := w.Write(b.A[b.Use][0:secondWriteLen]) - n += int64(m) - b.Advance(m) - - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != secondWriteLen { - return n, io.ErrShortWrite - } - } - - return n, nil -} - -// ReadFrom() reads data from r until EOF or error. The return value n -// is the number of bytes read. Any error except io.EOF encountered -// during the read is also returned. -func (b *FixedSizeRingBuf) ReadFrom(r io.Reader) (n int64, err error) { - for { - writeCapacity := b.N - b.Readable - if writeCapacity <= 0 { - // we are all full - return n, nil - } - writeStart := (b.Beg + b.Readable) % b.N - upperLim := intMin(writeStart+writeCapacity, b.N) - - m, e := r.Read(b.A[b.Use][writeStart:upperLim]) - n += int64(m) - b.Readable += m - if e == io.EOF { - return n, nil - } - if e != nil { - return n, e - } - } -} - -func (b *FixedSizeRingBuf) Reset() { - b.Beg = 0 - b.Readable = 0 - b.Use = 0 -} - -// Advance(): non-standard, but better than Next(), -// because we don't have to unwrap our buffer and pay the cpu time -// for the copy that unwrapping may need. -// Useful in conjuction/after ReadWithoutAdvance() above. -func (b *FixedSizeRingBuf) Advance(n int) { - if n <= 0 { - return - } - if n > b.Readable { - n = b.Readable - } - b.Readable -= n - b.Beg = (b.Beg + n) % b.N -} - -// Adopt(): non-standard. -// -// For efficiency's sake, (possibly) take ownership of -// already allocated slice offered in me. -// -// If me is large we will adopt it, and we will potentially then -// write to the me buffer. -// If we already have a bigger buffer, copy me into the existing -// buffer instead. -func (b *FixedSizeRingBuf) Adopt(me []byte) { - n := len(me) - if n > b.N { - b.A[0] = me - b.OneMade = false - b.N = n - b.Use = 0 - b.Beg = 0 - b.Readable = n - } else { - // we already have a larger buffer, reuse it. - copy(b.A[0], me) - b.Use = 0 - b.Beg = 0 - b.Readable = n - } -} - -func intMax(a, b int) int { - if a > b { - return a - } else { - return b - } -} - -func intMin(a, b int) int { - if a < b { - return a - } else { - return b - } -} - -// Get the (beg, end] indices of the tailing empty buffer of bytes slice that from that is free for writing. -// Note: not guaranteed to be zeroed. At all. -func (b *FixedSizeRingBuf) GetEndmostWritable() (beg int, end int) { - extent := b.Beg + b.Readable - if extent < b.N { - return extent, b.N - } - - return extent % b.N, b.Beg -} - -// Note: not guaranteed to be zeroed. -func (b *FixedSizeRingBuf) GetEndmostWritableSlice() []byte { - beg, e := b.GetEndmostWritable() - return b.A[b.Use][beg:e] -} diff --git a/vendor/github.com/glycerine/go-unsnap-stream/snap.go b/vendor/github.com/glycerine/go-unsnap-stream/snap.go deleted file mode 100644 index 12a8d40b5..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/snap.go +++ /dev/null @@ -1,100 +0,0 @@ -package unsnap - -import ( - "encoding/binary" - - // no c lib dependency - snappy "github.com/golang/snappy" - // or, use the C wrapper for speed - //snappy "github.com/dgryski/go-csnappy" -) - -// add Write() method for SnappyFile (see unsnap.go) - -// reference for snappy framing/streaming format: -// http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt -// ?spec=svn68&r=71 - -// -// Write writes len(p) bytes from p to the underlying data stream. -// It returns the number of bytes written from p (0 <= n <= len(p)) and -// any error encountered that caused the write to stop early. Write -// must return a non-nil error if it returns n < len(p). -// -func (sf *SnappyFile) Write(p []byte) (n int, err error) { - - if sf.SnappyEncodeDecodeOff { - return sf.Writer.Write(p) - } - - if !sf.Writing { - panic("Writing on a read-only SnappyFile") - } - - // encoding in snappy can apparently go beyond the original size, beware. - // so our buffers must be sized 2*max snappy chunk => 2 * CHUNK_MAX(65536) - - sf.DecBuf.Reset() - sf.EncBuf.Reset() - - if !sf.HeaderChunkWritten { - sf.HeaderChunkWritten = true - _, err = sf.Writer.Write(SnappyStreamHeaderMagic) - if err != nil { - return - } - } - var chunk []byte - var chunk_type byte - var crc uint32 - - for len(p) > 0 { - - // chunk points to input p by default, unencoded input. - chunk = p[:IntMin(len(p), CHUNK_MAX)] - crc = masked_crc32c(chunk) - - writeme := chunk[:] - - // first write to EncBuf, as a temp, in case we want - // to discard and send uncompressed instead. - compressed_chunk := snappy.Encode(sf.EncBuf.GetEndmostWritableSlice(), chunk) - - if len(compressed_chunk) <= int((1-_COMPRESSION_THRESHOLD)*float64(len(chunk))) { - writeme = compressed_chunk - chunk_type = _COMPRESSED_CHUNK - } else { - // keep writeme pointing at original chunk (uncompressed) - chunk_type = _UNCOMPRESSED_CHUNK - } - - const crc32Sz = 4 - var tag32 uint32 = uint32(chunk_type) + (uint32(len(writeme)+crc32Sz) << 8) - - err = binary.Write(sf.Writer, binary.LittleEndian, tag32) - if err != nil { - return - } - - err = binary.Write(sf.Writer, binary.LittleEndian, crc) - if err != nil { - return - } - - _, err = sf.Writer.Write(writeme) - if err != nil { - return - } - - n += len(chunk) - p = p[len(chunk):] - } - return n, nil -} - -func IntMin(a int, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt b/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt deleted file mode 100644 index 5f5027939..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt +++ /dev/null @@ -1 +0,0 @@ -hello_snappy diff --git a/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt.snappy b/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt.snappy deleted file mode 100644 index ba45ecd4269dd0d763893470fde8639f40203d06..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 31 mcmey*#=ubQml#kG$tcLcu=-fm#f;RPoc#FWyu^ZnN-hAVp$iuP diff --git a/vendor/github.com/glycerine/go-unsnap-stream/unsnap.go b/vendor/github.com/glycerine/go-unsnap-stream/unsnap.go deleted file mode 100644 index 0d33949e9..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/unsnap.go +++ /dev/null @@ -1,519 +0,0 @@ -package unsnap - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - - "hash/crc32" - - snappy "github.com/golang/snappy" - // The C library can be used, but this makes the binary dependent - // lots of extraneous c-libraries; it is no longer stand-alone. Yuck. - // - // Therefore we comment out the "dgryski/go-csnappy" path and use the - // "github.com/golang/snappy/snappy" above instead. If you are - // performance limited and can deal with distributing more libraries, - // then this is easy to swap. - // - // If you swap, note that some of the tests won't pass - // because snappy-go produces slightly different (but still - // conformant) encodings on some data. Here are bindings - // to the C-snappy: - // snappy "github.com/dgryski/go-csnappy" -) - -// SnappyFile: create a drop-in-replacement/wrapper for an *os.File that handles doing the unsnappification online as more is read from it - -type SnappyFile struct { - Fname string - - Reader io.Reader - Writer io.Writer - - // allow clients to substitute us for an os.File and just switch - // off compression if they don't want it. - SnappyEncodeDecodeOff bool // if true, we bypass straight to Filep - - EncBuf FixedSizeRingBuf // holds any extra that isn't yet returned, encoded - DecBuf FixedSizeRingBuf // holds any extra that isn't yet returned, decoded - - // for writing to stream-framed snappy - HeaderChunkWritten bool - - // Sanity check: we can only read, or only write, to one SnappyFile. - // EncBuf and DecBuf are used differently in each mode. Verify - // that we are consistent with this flag. - Writing bool -} - -var total int - -// for debugging, show state of buffers -func (f *SnappyFile) Dump() { - fmt.Printf("EncBuf has length %d and contents:\n%s\n", len(f.EncBuf.Bytes()), string(f.EncBuf.Bytes())) - fmt.Printf("DecBuf has length %d and contents:\n%s\n", len(f.DecBuf.Bytes()), string(f.DecBuf.Bytes())) -} - -func (f *SnappyFile) Read(p []byte) (n int, err error) { - - if f.SnappyEncodeDecodeOff { - return f.Reader.Read(p) - } - - if f.Writing { - panic("Reading on a write-only SnappyFile") - } - - // before we unencrypt more, try to drain the DecBuf first - n, _ = f.DecBuf.Read(p) - if n > 0 { - total += n - return n, nil - } - - //nEncRead, nDecAdded, err := UnsnapOneFrame(f.Filep, &f.EncBuf, &f.DecBuf, f.Fname) - _, _, err = UnsnapOneFrame(f.Reader, &f.EncBuf, &f.DecBuf, f.Fname) - if err != nil && err != io.EOF { - panic(err) - } - - n, _ = f.DecBuf.Read(p) - - if n > 0 { - total += n - return n, nil - } - if f.DecBuf.Readable == 0 { - if f.DecBuf.Readable == 0 && f.EncBuf.Readable == 0 { - // only now (when EncBuf is empty) can we give io.EOF. - // Any earlier, and we leave stuff un-decoded! - return 0, io.EOF - } - } - return 0, nil -} - -func Open(name string) (file *SnappyFile, err error) { - fp, err := os.Open(name) - if err != nil { - return nil, err - } - // encoding in snappy can apparently go beyond the original size, so - // we make our buffers big enough, 2*max snappy chunk => 2 * CHUNK_MAX(65536) - - snap := NewReader(fp) - snap.Fname = name - return snap, nil -} - -func NewReader(r io.Reader) *SnappyFile { - return &SnappyFile{ - Reader: r, - EncBuf: *NewFixedSizeRingBuf(CHUNK_MAX * 2), // buffer of snappy encoded bytes - DecBuf: *NewFixedSizeRingBuf(CHUNK_MAX * 2), // buffer of snapppy decoded bytes - Writing: false, - } -} - -func NewWriter(w io.Writer) *SnappyFile { - return &SnappyFile{ - Writer: w, - EncBuf: *NewFixedSizeRingBuf(65536), // on writing: temp for testing compression - DecBuf: *NewFixedSizeRingBuf(65536 * 2), // on writing: final buffer of snappy framed and encoded bytes - Writing: true, - } -} - -func Create(name string) (file *SnappyFile, err error) { - fp, err := os.Create(name) - if err != nil { - return nil, err - } - snap := NewWriter(fp) - snap.Fname = name - return snap, nil -} - -func (f *SnappyFile) Close() error { - if f.Writing { - wc, ok := f.Writer.(io.WriteCloser) - if ok { - return wc.Close() - } - return nil - } - rc, ok := f.Reader.(io.ReadCloser) - if ok { - return rc.Close() - } - return nil -} - -func (f *SnappyFile) Sync() error { - file, ok := f.Writer.(*os.File) - if ok { - return file.Sync() - } - return nil -} - -// for an increment of a frame at a time: -// read from r into encBuf (encBuf is still encoded, thus the name), and write unsnappified frames into outDecodedBuf -// the returned n: number of bytes read from the encrypted encBuf -func UnsnapOneFrame(r io.Reader, encBuf *FixedSizeRingBuf, outDecodedBuf *FixedSizeRingBuf, fname string) (nEnc int64, nDec int64, err error) { - // b, err := ioutil.ReadAll(r) - // if err != nil { - // panic(err) - // } - - nEnc = 0 - nDec = 0 - - // read up to 65536 bytes from r into encBuf, at least a snappy frame - nread, err := io.CopyN(encBuf, r, 65536) // returns nwrotebytes, err - nEnc += nread - if err != nil { - if err == io.EOF { - if nread == 0 { - if encBuf.Readable == 0 { - return nEnc, nDec, io.EOF - } - // else we have bytes in encBuf, so decode them! - err = nil - } else { - // continue below, processing the nread bytes - err = nil - } - } else { - // may be an odd already closed... don't panic on that - if strings.Contains(err.Error(), "file already closed") { - err = nil - } else { - panic(err) - } - } - } - - // flag for printing chunk size alignment messages - verbose := false - - const snappyStreamHeaderSz = 10 - const headerSz = 4 - const crc32Sz = 4 - // the magic 18 bytes accounts for the snappy streaming header and the first chunks size and checksum - // http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt - - chunk := (*encBuf).Bytes() - - // however we exit, advance as - // defer func() { (*encBuf).Next(N) }() - - // 65536 is the max size of a snappy framed chunk. See - // http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt:91 - // buf := make([]byte, 65536) - - // fmt.Printf("read from file, b is len:%d with value: %#v\n", len(b), b) - // fmt.Printf("read from file, bcut is len:%d with value: %#v\n", len(bcut), bcut) - - //fmt.Printf("raw bytes of chunksz are: %v\n", b[11:14]) - - fourbytes := make([]byte, 4) - chunkCount := 0 - - for nDec < 65536 { - if len(chunk) == 0 { - break - } - chunkCount++ - fourbytes[3] = 0 - copy(fourbytes, chunk[1:4]) - chunksz := binary.LittleEndian.Uint32(fourbytes) - chunk_type := chunk[0] - - switch true { - case chunk_type == 0xff: - { // stream identifier - - streamHeader := chunk[:snappyStreamHeaderSz] - if 0 != bytes.Compare(streamHeader, []byte{0xff, 0x06, 0x00, 0x00, 0x73, 0x4e, 0x61, 0x50, 0x70, 0x59}) { - panic("file had chunk starting with 0xff but then no magic snappy streaming protocol bytes, aborting.") - } else { - //fmt.Printf("got streaming snappy magic header just fine.\n") - } - chunk = chunk[snappyStreamHeaderSz:] - (*encBuf).Advance(snappyStreamHeaderSz) - nEnc += snappyStreamHeaderSz - continue - } - case chunk_type == 0x00: - { // compressed data - if verbose { - fmt.Fprintf(os.Stderr, "chunksz is %d while total bytes avail are: %d\n", int(chunksz), len(chunk)-4) - } - - crc := binary.LittleEndian.Uint32(chunk[headerSz:(headerSz + crc32Sz)]) - section := chunk[(headerSz + crc32Sz):(headerSz + chunksz)] - - dec, ok := snappy.Decode(nil, section) - if ok != nil { - // we've probably truncated a snappy frame at this point - // ok=snappy: corrupt input - // len(dec) == 0 - // - panic(fmt.Sprintf("could not decode snappy stream: '%s' and len dec=%d and ok=%v\n", fname, len(dec), ok)) - - // get back to caller with what we've got so far - return nEnc, nDec, nil - } - // fmt.Printf("ok, b is %#v , %#v\n", ok, dec) - - // spit out decoded text - // n, err := w.Write(dec) - //fmt.Printf("len(dec) = %d, outDecodedBuf.Readable=%d\n", len(dec), outDecodedBuf.Readable) - bnb := bytes.NewBuffer(dec) - n, err := io.Copy(outDecodedBuf, bnb) - if err != nil { - //fmt.Printf("got n=%d, err= %s ; when trying to io.Copy(outDecodedBuf: N=%d, Readable=%d)\n", n, err, outDecodedBuf.N, outDecodedBuf.Readable) - panic(err) - } - if n != int64(len(dec)) { - panic("could not write all bytes to outDecodedBuf") - } - nDec += n - - // verify the crc32 rotated checksum - m32 := masked_crc32c(dec) - if m32 != crc { - panic(fmt.Sprintf("crc32 masked failiure. expected: %v but got: %v", crc, m32)) - } else { - //fmt.Printf("\nchecksums match: %v == %v\n", crc, m32) - } - - // move to next header - inc := (headerSz + int(chunksz)) - chunk = chunk[inc:] - (*encBuf).Advance(inc) - nEnc += int64(inc) - continue - } - case chunk_type == 0x01: - { // uncompressed data - - //n, err := w.Write(chunk[(headerSz+crc32Sz):(headerSz + int(chunksz))]) - n, err := io.Copy(outDecodedBuf, bytes.NewBuffer(chunk[(headerSz+crc32Sz):(headerSz+int(chunksz))])) - if verbose { - //fmt.Printf("debug: n=%d err=%v chunksz=%d outDecodedBuf='%v'\n", n, err, chunksz, outDecodedBuf) - } - if err != nil { - panic(err) - } - if n != int64(chunksz-crc32Sz) { - panic("could not write all bytes to stdout") - } - nDec += n - - inc := (headerSz + int(chunksz)) - chunk = chunk[inc:] - (*encBuf).Advance(inc) - nEnc += int64(inc) - continue - } - case chunk_type == 0xfe: - fallthrough // padding, just skip it - case chunk_type >= 0x80 && chunk_type <= 0xfd: - { // Reserved skippable chunks - //fmt.Printf("\nin reserved skippable chunks, at nEnc=%v\n", nEnc) - inc := (headerSz + int(chunksz)) - chunk = chunk[inc:] - nEnc += int64(inc) - (*encBuf).Advance(inc) - continue - } - - default: - panic(fmt.Sprintf("unrecognized/unsupported chunk type %#v", chunk_type)) - } - - } // end for{} - - return nEnc, nDec, err - //return int64(N), nil -} - -// for whole file at once: -// -// receive on stdin a stream of bytes in the snappy-streaming framed -// format, defined here: http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt -// Grab each frame, run it through the snappy decoder, and spit out -// each frame all joined back-to-back on stdout. -// -func Unsnappy(r io.Reader, w io.Writer) (err error) { - b, err := ioutil.ReadAll(r) - if err != nil { - panic(err) - } - - // flag for printing chunk size alignment messages - verbose := false - - const snappyStreamHeaderSz = 10 - const headerSz = 4 - const crc32Sz = 4 - // the magic 18 bytes accounts for the snappy streaming header and the first chunks size and checksum - // http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt - - chunk := b[:] - - // 65536 is the max size of a snappy framed chunk. See - // http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt:91 - //buf := make([]byte, 65536) - - // fmt.Printf("read from file, b is len:%d with value: %#v\n", len(b), b) - // fmt.Printf("read from file, bcut is len:%d with value: %#v\n", len(bcut), bcut) - - //fmt.Printf("raw bytes of chunksz are: %v\n", b[11:14]) - - fourbytes := make([]byte, 4) - chunkCount := 0 - - for { - if len(chunk) == 0 { - break - } - chunkCount++ - fourbytes[3] = 0 - copy(fourbytes, chunk[1:4]) - chunksz := binary.LittleEndian.Uint32(fourbytes) - chunk_type := chunk[0] - - switch true { - case chunk_type == 0xff: - { // stream identifier - - streamHeader := chunk[:snappyStreamHeaderSz] - if 0 != bytes.Compare(streamHeader, []byte{0xff, 0x06, 0x00, 0x00, 0x73, 0x4e, 0x61, 0x50, 0x70, 0x59}) { - panic("file had chunk starting with 0xff but then no magic snappy streaming protocol bytes, aborting.") - } else { - //fmt.Printf("got streaming snappy magic header just fine.\n") - } - chunk = chunk[snappyStreamHeaderSz:] - continue - } - case chunk_type == 0x00: - { // compressed data - if verbose { - fmt.Fprintf(os.Stderr, "chunksz is %d while total bytes avail are: %d\n", int(chunksz), len(chunk)-4) - } - - //crc := binary.LittleEndian.Uint32(chunk[headerSz:(headerSz + crc32Sz)]) - section := chunk[(headerSz + crc32Sz):(headerSz + chunksz)] - - dec, ok := snappy.Decode(nil, section) - if ok != nil { - panic("could not decode snappy stream") - } - // fmt.Printf("ok, b is %#v , %#v\n", ok, dec) - - // spit out decoded text - n, err := w.Write(dec) - if err != nil { - panic(err) - } - if n != len(dec) { - panic("could not write all bytes to stdout") - } - - // TODO: verify the crc32 rotated checksum? - - // move to next header - chunk = chunk[(headerSz + int(chunksz)):] - continue - } - case chunk_type == 0x01: - { // uncompressed data - - //crc := binary.LittleEndian.Uint32(chunk[headerSz:(headerSz + crc32Sz)]) - section := chunk[(headerSz + crc32Sz):(headerSz + chunksz)] - - n, err := w.Write(section) - if err != nil { - panic(err) - } - if n != int(chunksz-crc32Sz) { - panic("could not write all bytes to stdout") - } - - chunk = chunk[(headerSz + int(chunksz)):] - continue - } - case chunk_type == 0xfe: - fallthrough // padding, just skip it - case chunk_type >= 0x80 && chunk_type <= 0xfd: - { // Reserved skippable chunks - chunk = chunk[(headerSz + int(chunksz)):] - continue - } - - default: - panic(fmt.Sprintf("unrecognized/unsupported chunk type %#v", chunk_type)) - } - - } // end for{} - - return nil -} - -// 0xff 0x06 0x00 0x00 sNaPpY -var SnappyStreamHeaderMagic = []byte{0xff, 0x06, 0x00, 0x00, 0x73, 0x4e, 0x61, 0x50, 0x70, 0x59} - -const CHUNK_MAX = 65536 -const _STREAM_TO_STREAM_BLOCK_SIZE = CHUNK_MAX -const _STREAM_IDENTIFIER = `sNaPpY` -const _COMPRESSED_CHUNK = 0x00 -const _UNCOMPRESSED_CHUNK = 0x01 -const _IDENTIFIER_CHUNK = 0xff -const _RESERVED_UNSKIPPABLE0 = 0x02 // chunk ranges are [inclusive, exclusive) -const _RESERVED_UNSKIPPABLE1 = 0x80 -const _RESERVED_SKIPPABLE0 = 0x80 -const _RESERVED_SKIPPABLE1 = 0xff - -// the minimum percent of bytes compression must save to be enabled in automatic -// mode -const _COMPRESSION_THRESHOLD = .125 - -var crctab *crc32.Table - -func init() { - crctab = crc32.MakeTable(crc32.Castagnoli) // this is correct table, matches the crc32c.c code used by python -} - -func masked_crc32c(data []byte) uint32 { - - // see the framing format specification, http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt - var crc uint32 = crc32.Checksum(data, crctab) - return (uint32((crc>>15)|(crc<<17)) + 0xa282ead8) -} - -func ReadSnappyStreamCompressedFile(filename string) ([]byte, error) { - - snappyFile, err := Open(filename) - if err != nil { - return []byte{}, err - } - - var bb bytes.Buffer - _, err = bb.ReadFrom(snappyFile) - if err == io.EOF { - err = nil - } - if err != nil { - panic(err) - } - - return bb.Bytes(), err -} diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/golang-jwt/jwt/v4/.gitignore similarity index 68% rename from vendor/github.com/dgrijalva/jwt-go/.gitignore rename to vendor/github.com/golang-jwt/jwt/v4/.gitignore index 80bed650e..09573e016 100644 --- a/vendor/github.com/dgrijalva/jwt-go/.gitignore +++ b/vendor/github.com/golang-jwt/jwt/v4/.gitignore @@ -1,4 +1,4 @@ .DS_Store bin - +.idea/ diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/golang-jwt/jwt/v4/LICENSE similarity index 96% rename from vendor/github.com/dgrijalva/jwt-go/LICENSE rename to vendor/github.com/golang-jwt/jwt/v4/LICENSE index df83a9c2f..35dbc2520 100644 --- a/vendor/github.com/dgrijalva/jwt-go/LICENSE +++ b/vendor/github.com/golang-jwt/jwt/v4/LICENSE @@ -1,4 +1,5 @@ Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md new file mode 100644 index 000000000..32966f598 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md @@ -0,0 +1,22 @@ +## Migration Guide (v4.0.0) + +Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be: + + "github.com/golang-jwt/jwt/v4" + +The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as +`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having +troubles migrating, please open an issue. + +You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`. + +And then you'd typically run: + +``` +go get github.com/golang-jwt/jwt/v4 +go mod tidy +``` + +## Older releases (before v3.2.0) + +The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md similarity index 60% rename from vendor/github.com/dgrijalva/jwt-go/README.md rename to vendor/github.com/golang-jwt/jwt/v4/README.md index d7749077f..3072d24a9 100644 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -1,25 +1,36 @@ # jwt-go -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) -[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) +[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v4.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). -**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. +Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. +See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. -**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. +> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. + + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. **SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. +### Supported Go versions + +Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). +So we will support a major version of Go until there are two newer major releases. +We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities +which will not be fixed. + ## What the heck is a JWT? JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way. The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) for information about reserved keys and the proper way to add your own. +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own. ## What's in the box? @@ -27,11 +38,11 @@ This library supports the parsing and verification as well as the generation and ## Examples -See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage: -* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) -* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) -* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) ## Extensions @@ -41,20 +52,18 @@ Here's an example of an extension that integrates with multiple Google Cloud Pla ## Compliance -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: +This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: -* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. +* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. ## Project Status & Versioning This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). **BREAKING CHANGES:*** -* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. +A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. ## Usage Tips @@ -79,9 +88,10 @@ Asymmetric signing methods, such as RSA, use different keys for signing and veri Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: -* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation ### JWT and OAuth @@ -99,6 +109,6 @@ This library uses descriptive error messages whenever possible. If you are not g ## More -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md similarity index 84% rename from vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md rename to vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md index 637029831..afbfc4e40 100644 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md @@ -1,5 +1,22 @@ ## `jwt-go` Version History +#### 4.0.0 + +* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`. + +#### 3.2.2 + +* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). +* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). +* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). +* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). + +#### 3.2.1 + +* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code + * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` +* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 + #### 3.2.0 * Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation @@ -115,4 +132,4 @@ It is likely the only integration change required here will be to change `func(t * First versioned release * API stabilized * Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file +* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go new file mode 100644 index 000000000..b07ac02de --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -0,0 +1,267 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// Claims must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// RegisteredClaims are a structured version of the JWT Claims Set, +// restricted to Registered Claim Names, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 +// +// This type can be used on its own, but then additional private and +// public claims embedded in the JWT will not be parsed. The typical usecase +// therefore is to embedded this in a user-defined claim type. +// +// See examples for how to use this with your own claim types. +type RegisteredClaims struct { + // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1 + Issuer string `json:"iss,omitempty"` + + // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 + Subject string `json:"sub,omitempty"` + + // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3 + Audience ClaimStrings `json:"aud,omitempty"` + + // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 + ExpiresAt *NumericDate `json:"exp,omitempty"` + + // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5 + NotBefore *NumericDate `json:"nbf,omitempty"` + + // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6 + IssuedAt *NumericDate `json:"iat,omitempty"` + + // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7 + ID string `json:"jti,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c RegisteredClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := now.Sub(c.ExpiresAt.Time) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = fmt.Errorf("token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. +func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool { + if c.ExpiresAt == nil { + return verifyExp(nil, cmp, req) + } + + return verifyExp(&c.ExpiresAt.Time, cmp, req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool { + if c.IssuedAt == nil { + return verifyIat(nil, cmp, req) + } + + return verifyIat(&c.IssuedAt.Time, cmp, req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool { + if c.NotBefore == nil { + return verifyNbf(nil, cmp, req) + } + + return verifyNbf(&c.NotBefore.Time, cmp, req) +} + +// StandardClaims are a structured version of the JWT Claims Set, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the +// specification exactly, since they were based on an earlier draft of the +// specification and not updated. The main difference is that they only +// support integer-based date fields and singular audiences. This might lead to +// incompatibilities with other JWT implementations. The use of this is discouraged, instead +// the newer RegisteredClaims struct should be used. +// +// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct. +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = fmt.Errorf("token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud([]string{c.Audience}, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + if c.ExpiresAt == 0 { + return verifyExp(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.ExpiresAt, 0) + return verifyExp(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + if c.IssuedAt == 0 { + return verifyIat(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.IssuedAt, 0) + return verifyIat(&t, time.Unix(cmp, 0), req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + if c.NotBefore == 0 { + return verifyNbf(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.NotBefore, 0) + return verifyNbf(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + // use a var here to keep constant time compare when looping over a number of claims + result := false + + var stringClaims string + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + result = true + } + stringClaims = stringClaims + a + } + + // case where "" is sent in one or many aud claims + if len(stringClaims) == 0 { + return !required + } + + return result +} + +func verifyExp(exp *time.Time, now time.Time, required bool) bool { + if exp == nil { + return !required + } + return now.Before(*exp) +} + +func verifyIat(iat *time.Time, now time.Time, required bool) bool { + if iat == nil { + return !required + } + return now.After(*iat) || now.Equal(*iat) +} + +func verifyNbf(nbf *time.Time, now time.Time, required bool) bool { + if nbf == nil { + return !required + } + return now.After(*nbf) || now.Equal(*nbf) +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/golang-jwt/jwt/v4/doc.go similarity index 100% rename from vendor/github.com/dgrijalva/jwt-go/doc.go rename to vendor/github.com/golang-jwt/jwt/v4/doc.go diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go similarity index 79% rename from vendor/github.com/dgrijalva/jwt-go/ecdsa.go rename to vendor/github.com/golang-jwt/jwt/v4/ecdsa.go index f97738124..eac023fc6 100644 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go @@ -13,7 +13,7 @@ var ( ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") ) -// Implements the ECDSA family of signing methods signing methods +// SigningMethodECDSA implements the ECDSA family of signing methods. // Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification type SigningMethodECDSA struct { Name string @@ -53,7 +53,7 @@ func (m *SigningMethodECDSA) Alg() string { return m.Name } -// Implements the Verify method from SigningMethod +// Verify implements token verification for the SigningMethod. // For this verify method, key must be an ecdsa.PublicKey struct func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { var err error @@ -88,14 +88,14 @@ func (m *SigningMethodECDSA) Verify(signingString, signature string, key interfa hasher.Write([]byte(signingString)) // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { return nil - } else { - return ErrECDSAVerification } + + return ErrECDSAVerification } -// Implements the Sign method from SigningMethod +// Sign implements token signing for the SigningMethod. // For this signing method, key must be an ecdsa.PrivateKey struct func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { // Get the key @@ -128,18 +128,12 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string keyBytes += 1 } - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) + // We serialize the outputs (r and s) into big-endian byte arrays + // padded with zeros on the left to make sure the sizes work out. + // Output must be 2*keyBytes long. + out := make([]byte, 2*keyBytes) + r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. + s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. return EncodeSegment(out), nil } else { diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go similarity index 81% rename from vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go rename to vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go index db9f4be7d..5700636d3 100644 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go @@ -8,11 +8,11 @@ import ( ) var ( - ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") + ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") ) -// Parse PEM encoded Elliptic Curve Private Key Structure +// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { var err error @@ -39,7 +39,7 @@ func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { return pkey, nil } -// Parse PEM encoded PKCS1 or PKCS8 public key +// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { var err error diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go new file mode 100644 index 000000000..07d3aacd6 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go @@ -0,0 +1,85 @@ +package jwt + +import ( + "errors" + + "crypto" + "crypto/ed25519" + "crypto/rand" +) + +var ( + ErrEd25519Verification = errors.New("ed25519: verification error") +) + +// SigningMethodEd25519 implements the EdDSA family. +// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification +type SigningMethodEd25519 struct{} + +// Specific instance for EdDSA +var ( + SigningMethodEdDSA *SigningMethodEd25519 +) + +func init() { + SigningMethodEdDSA = &SigningMethodEd25519{} + RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { + return SigningMethodEdDSA + }) +} + +func (m *SigningMethodEd25519) Alg() string { + return "EdDSA" +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ed25519.PublicKey +func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error { + var err error + var ed25519Key ed25519.PublicKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PublicKey); !ok { + return ErrInvalidKeyType + } + + if len(ed25519Key) != ed25519.PublicKeySize { + return ErrInvalidKey + } + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Verify the signature + if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { + return ErrEd25519Verification + } + + return nil +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ed25519.PrivateKey +func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { + var ed25519Key crypto.Signer + var ok bool + + if ed25519Key, ok = key.(crypto.Signer); !ok { + return "", ErrInvalidKeyType + } + + if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { + return "", ErrInvalidKey + } + + // Sign the string and return the encoded result + // ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0) + sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0)) + if err != nil { + return "", err + } + return EncodeSegment(sig), nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go new file mode 100644 index 000000000..cdb5e68e8 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go @@ -0,0 +1,64 @@ +package jwt + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key") + ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key") +) + +// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key +func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PrivateKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { + return nil, ErrNotEdPrivateKey + } + + return pkey, nil +} + +// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key +func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PublicKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { + return nil, ErrNotEdPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go similarity index 88% rename from vendor/github.com/dgrijalva/jwt-go/errors.go rename to vendor/github.com/golang-jwt/jwt/v4/errors.go index 1c93024aa..f309878b3 100644 --- a/vendor/github.com/dgrijalva/jwt-go/errors.go +++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go @@ -27,7 +27,7 @@ const ( ValidationErrorClaimsInvalid // Generic claims validation error ) -// Helper for constructing a ValidationError with a string error message +// NewValidationError is a helper for constructing a ValidationError with a string error message func NewValidationError(errorText string, errorFlags uint32) *ValidationError { return &ValidationError{ text: errorText, @@ -35,14 +35,14 @@ func NewValidationError(errorText string, errorFlags uint32) *ValidationError { } } -// The error from Parse if token is not valid +// ValidationError represents an error from Parse if token is not valid type ValidationError struct { Inner error // stores the error returned by external dependencies, i.e.: KeyFunc Errors uint32 // bitfield. see ValidationError... constants text string // errors that do not have a valid error just have text } -// Validation error is an error type +// Error is the implementation of the err interface. func (e ValidationError) Error() string { if e.Inner != nil { return e.Inner.Error() diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go similarity index 90% rename from vendor/github.com/dgrijalva/jwt-go/hmac.go rename to vendor/github.com/golang-jwt/jwt/v4/hmac.go index addbe5d40..011f68a27 100644 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ b/vendor/github.com/golang-jwt/jwt/v4/hmac.go @@ -6,7 +6,7 @@ import ( "errors" ) -// Implements the HMAC-SHA family of signing methods signing methods +// SigningMethodHMAC implements the HMAC-SHA family of signing methods. // Expects key type of []byte for both signing and validation type SigningMethodHMAC struct { Name string @@ -45,7 +45,7 @@ func (m *SigningMethodHMAC) Alg() string { return m.Name } -// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid. func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { // Verify the key is the right type keyBytes, ok := key.([]byte) @@ -77,7 +77,7 @@ func (m *SigningMethodHMAC) Verify(signingString, signature string, key interfac return nil } -// Implements the Sign method from SigningMethod for this signing method. +// Sign implements token signing for the SigningMethod. // Key must be []byte func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { if keyBytes, ok := key.([]byte); ok { diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go new file mode 100644 index 000000000..e7da633b9 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "encoding/json" + "errors" + "time" + // "fmt" +) + +// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding. +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// VerifyAudience Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + var aud []string + switch v := m["aud"].(type) { + case string: + aud = append(aud, v) + case []string: + aud = v + case []interface{}: + for _, a := range v { + vs, ok := a.(string) + if !ok { + return false + } + aud = append(aud, vs) + } + } + return verifyAud(aud, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["exp"] + if !ok { + return !req + } + + switch exp := v.(type) { + case float64: + if exp == 0 { + return verifyExp(nil, cmpTime, req) + } + + return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req) + case json.Number: + v, _ := exp.Float64() + + return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["iat"] + if !ok { + return !req + } + + switch iat := v.(type) { + case float64: + if iat == 0 { + return verifyIat(nil, cmpTime, req) + } + + return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req) + case json.Number: + v, _ := iat.Float64() + + return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["nbf"] + if !ok { + return !req + } + + switch nbf := v.(type) { + case float64: + if nbf == 0 { + return verifyNbf(nil, cmpTime, req) + } + + return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req) + case json.Number: + v, _ := nbf.Float64() + + return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if !m.VerifyExpiresAt(now, false) { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if !m.VerifyIssuedAt(now, false) { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !m.VerifyNotBefore(now, false) { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/golang-jwt/jwt/v4/none.go similarity index 94% rename from vendor/github.com/dgrijalva/jwt-go/none.go rename to vendor/github.com/golang-jwt/jwt/v4/none.go index f04d189d0..f19835d20 100644 --- a/vendor/github.com/dgrijalva/jwt-go/none.go +++ b/vendor/github.com/golang-jwt/jwt/v4/none.go @@ -1,6 +1,6 @@ package jwt -// Implements the none signing method. This is required by the spec +// SigningMethodNone implements the none signing method. This is required by the spec // but you probably should never use it. var SigningMethodNone *signingMethodNone diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go similarity index 95% rename from vendor/github.com/dgrijalva/jwt-go/parser.go rename to vendor/github.com/golang-jwt/jwt/v4/parser.go index d6901d9ad..0c811f311 100644 --- a/vendor/github.com/dgrijalva/jwt-go/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -13,7 +13,7 @@ type Parser struct { SkipClaimsValidation bool // Skip claims validation during token parsing } -// Parse, validate, and return a token. +// Parse parses, validates, and returns a token. // keyFunc will receive the parsed token and should return the key for validating. // If everything is kosher, err will be nil func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { @@ -87,12 +87,12 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf return token, vErr } -// WARNING: Don't use this method unless you know what you're doing +// ParseUnverified parses the token but doesn't validate the signature. // -// This method parses the token but doesn't validate the signature. It's only -// ever useful in cases where you know the signature is valid (because it has -// been checked previously in the stack) and you want to extract values from -// it. +// WARNING: Don't use this method unless you know what you're doing. +// +// It's only ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from it. func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { parts = strings.Split(tokenString, ".") if len(parts) != 3 { diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go similarity index 92% rename from vendor/github.com/dgrijalva/jwt-go/rsa.go rename to vendor/github.com/golang-jwt/jwt/v4/rsa.go index e4caf1ca4..b910b19c0 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa.go @@ -6,7 +6,7 @@ import ( "crypto/rsa" ) -// Implements the RSA family of signing methods signing methods +// SigningMethodRSA implements the RSA family of signing methods. // Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation type SigningMethodRSA struct { Name string @@ -44,7 +44,7 @@ func (m *SigningMethodRSA) Alg() string { return m.Name } -// Implements the Verify method from SigningMethod +// Verify implements token verification for the SigningMethod // For this signing method, must be an *rsa.PublicKey structure. func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { var err error @@ -73,7 +73,7 @@ func (m *SigningMethodRSA) Verify(signingString, signature string, key interface return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) } -// Implements the Sign method from SigningMethod +// Sign implements token signing for the SigningMethod // For this signing method, must be an *rsa.PrivateKey structure. func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { var rsaKey *rsa.PrivateKey diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go similarity index 94% rename from vendor/github.com/dgrijalva/jwt-go/rsa_pss.go rename to vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go index c01470864..5a8502feb 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go @@ -8,7 +8,7 @@ import ( "crypto/rsa" ) -// Implements the RSAPSS family of signing methods signing methods +// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods type SigningMethodRSAPSS struct { *SigningMethodRSA Options *rsa.PSSOptions @@ -79,7 +79,7 @@ func init() { }) } -// Implements the Verify method from SigningMethod +// Verify implements token verification for the SigningMethod. // For this verify method, key must be an rsa.PublicKey struct func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { var err error @@ -113,7 +113,7 @@ func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interf return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) } -// Implements the Sign method from SigningMethod +// Sign implements token signing for the SigningMethod. // For this signing method, key must be an rsa.PrivateKey struct func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { var rsaKey *rsa.PrivateKey diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go similarity index 72% rename from vendor/github.com/dgrijalva/jwt-go/rsa_utils.go rename to vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go index 14c78c292..1966c450b 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go @@ -8,12 +8,12 @@ import ( ) var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") + ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") ) -// Parse PEM encoded PKCS1 or PKCS8 private key +// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { var err error @@ -39,7 +39,11 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { return pkey, nil } -// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password +// +// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock +// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative +// in the Go standard library for now. See https://github.com/golang/go/issues/8860. func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { var err error @@ -71,7 +75,7 @@ func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.Pr return pkey, nil } -// Parse PEM encoded PKCS1 or PKCS8 public key +// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { var err error diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go similarity index 79% rename from vendor/github.com/dgrijalva/jwt-go/signing_method.go rename to vendor/github.com/golang-jwt/jwt/v4/signing_method.go index ed1f212b2..3269170f3 100644 --- a/vendor/github.com/dgrijalva/jwt-go/signing_method.go +++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go @@ -7,14 +7,14 @@ import ( var signingMethods = map[string]func() SigningMethod{} var signingMethodLock = new(sync.RWMutex) -// Implement SigningMethod to add new methods for signing or verifying tokens. +// SigningMethod can be used add new methods for signing or verifying tokens. type SigningMethod interface { Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error Alg() string // returns the alg identifier for this method (example: 'HS256') } -// Register the "alg" name and a factory function for signing method. +// RegisterSigningMethod registers the "alg" name and a factory function for signing method. // This is typically done during init() in the method's implementation func RegisterSigningMethod(alg string, f func() SigningMethod) { signingMethodLock.Lock() @@ -23,7 +23,7 @@ func RegisterSigningMethod(alg string, f func() SigningMethod) { signingMethods[alg] = f } -// Get a signing method from an "alg" string +// GetSigningMethod retrieves a signing method from an "alg" string func GetSigningMethod(alg string) (method SigningMethod) { signingMethodLock.RLock() defer signingMethodLock.RUnlock() diff --git a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf new file mode 100644 index 000000000..53745d51d --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"] diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go similarity index 74% rename from vendor/github.com/dgrijalva/jwt-go/token.go rename to vendor/github.com/golang-jwt/jwt/v4/token.go index d637e0867..b896acb0b 100644 --- a/vendor/github.com/dgrijalva/jwt-go/token.go +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -12,13 +12,13 @@ import ( // server uses a different time zone than your tokens. var TimeFunc = time.Now -// Parse methods use this callback function to supply +// Keyfunc will be used by the Parse methods as a callback function to supply // the key for verification. The function receives the parsed, // but unverified Token. This allows you to use properties in the // Header of the token (such as `kid`) to identify which key to use. type Keyfunc func(*Token) (interface{}, error) -// A JWT Token. Different fields will be used depending on whether you're +// Token represents a JWT Token. Different fields will be used depending on whether you're // creating or parsing/verifying a token. type Token struct { Raw string // The raw token. Populated when you Parse a token @@ -29,7 +29,7 @@ type Token struct { Valid bool // Is the token valid? Populated when you Parse/Verify a token } -// Create a new Token. Takes a signing method +// New creates a new Token. Takes a signing method func New(method SigningMethod) *Token { return NewWithClaims(method, MapClaims{}) } @@ -45,7 +45,7 @@ func NewWithClaims(method SigningMethod, claims Claims) *Token { } } -// Get the complete, signed token +// SignedString retrieves the complete, signed token func (t *Token) SignedString(key interface{}) (string, error) { var sig, sstr string var err error @@ -58,14 +58,14 @@ func (t *Token) SignedString(key interface{}) (string, error) { return strings.Join([]string{sstr, sig}, "."), nil } -// Generate the signing string. This is the +// SigningString generates the signing string. This is the // most expensive part of the whole deal. Unless you // need this for something special, just go straight for // the SignedString. func (t *Token) SigningString() (string, error) { var err error parts := make([]string, 2) - for i, _ := range parts { + for i := range parts { var jsonValue []byte if i == 0 { if jsonValue, err = json.Marshal(t.Header); err != nil { @@ -82,7 +82,7 @@ func (t *Token) SigningString() (string, error) { return strings.Join(parts, "."), nil } -// Parse, validate, and return a token. +// Parse parses, validates, and returns a token. // keyFunc will receive the parsed token and should return the key for validating. // If everything is kosher, err will be nil func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { @@ -93,16 +93,18 @@ func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) } -// Encode JWT specific base64url encoding with padding stripped +// EncodeSegment encodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") + return base64.RawURLEncoding.EncodeToString(seg) } -// Decode JWT specific base64url encoding with padding stripped +// DecodeSegment decodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) + return base64.RawURLEncoding.DecodeString(seg) } diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go new file mode 100644 index 000000000..15c39a302 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/types.go @@ -0,0 +1,125 @@ +package jwt + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "time" +) + +// TimePrecision sets the precision of times and dates within this library. +// This has an influence on the precision of times when comparing expiry or +// other related time fields. Furthermore, it is also the precision of times +// when serializing. +// +// For backwards compatibility the default precision is set to seconds, so that +// no fractional timestamps are generated. +var TimePrecision = time.Second + +// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially +// its MarshalJSON function. +// +// If it is set to true (the default), it will always serialize the type as an +// array of strings, even if it just contains one element, defaulting to the behaviour +// of the underlying []string. If it is set to false, it will serialize to a single +// string, if it contains one element. Otherwise, it will serialize to an array of strings. +var MarshalSingleStringAsArray = true + +// NumericDate represents a JSON numeric date value, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-2. +type NumericDate struct { + time.Time +} + +// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct. +// It will truncate the timestamp according to the precision specified in TimePrecision. +func NewNumericDate(t time.Time) *NumericDate { + return &NumericDate{t.Truncate(TimePrecision)} +} + +// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a +// UNIX epoch with the float fraction representing non-integer seconds. +func newNumericDateFromSeconds(f float64) *NumericDate { + return NewNumericDate(time.Unix(0, int64(f*float64(time.Second)))) +} + +// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch +// represented in NumericDate to a byte array, using the precision specified in TimePrecision. +func (date NumericDate) MarshalJSON() (b []byte, err error) { + f := float64(date.Truncate(TimePrecision).UnixNano()) / float64(time.Second) + + return []byte(strconv.FormatFloat(f, 'f', -1, 64)), nil +} + +// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a +// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch +// with either integer or non-integer seconds. +func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { + var ( + number json.Number + f float64 + ) + + if err = json.Unmarshal(b, &number); err != nil { + return fmt.Errorf("could not parse NumericData: %w", err) + } + + if f, err = number.Float64(); err != nil { + return fmt.Errorf("could not convert json number value to float: %w", err) + } + + n := newNumericDateFromSeconds(f) + *date = *n + + return nil +} + +// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string. +// This type is necessary, since the "aud" claim can either be a single string or an array. +type ClaimStrings []string + +func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { + var value interface{} + + if err = json.Unmarshal(data, &value); err != nil { + return err + } + + var aud []string + + switch v := value.(type) { + case string: + aud = append(aud, v) + case []string: + aud = ClaimStrings(v) + case []interface{}: + for _, vv := range v { + vs, ok := vv.(string) + if !ok { + return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} + } + aud = append(aud, vs) + } + case nil: + return nil + default: + return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } + + *s = aud + + return +} + +func (s ClaimStrings) MarshalJSON() (b []byte, err error) { + // This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field, + // only contains one element, it MAY be serialized as a single string. This may or may not be + // desired based on the ecosystem of other JWT library used, so we make it configurable by the + // variable MarshalSingleStringAsArray. + if len(s) == 1 && !MarshalSingleStringAsArray { + return json.Marshal(s[0]) + } + + return json.Marshal([]string(s)) +} diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README deleted file mode 100644 index 387b4eb68..000000000 --- a/vendor/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/README.md b/vendor/github.com/golang/glog/README.md new file mode 100644 index 000000000..a4f73883b --- /dev/null +++ b/vendor/github.com/golang/glog/README.md @@ -0,0 +1,36 @@ +# glog + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/golang/glog)](https://pkg.go.dev/github.com/golang/glog) + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package [_glog_](https://github.com/google/glog). + +By binding methods to booleans it is possible to use the log package without paying the expense of evaluating the arguments to the log. Through the `-vmodule` flag, the package also provides fine-grained +control over logging at the file level. + +The comment from `glog.go` introduces the ideas: + +Package _glog_ implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. It provides the functions Info, Warning, Error, Fatal, plus formatting variants such as Infof. It also provides V-style loggingcontrolled by the `-v` and `-vmodule=file=2` flags. + +Basic examples: + +```go +glog.Info("Prepare to repel boarders") + +glog.Fatalf("Initialization failed: %s", err) +``` + +See the documentation for the V function for an explanation of these examples: + +```go +if glog.V(2) { + glog.Info("Starting transaction...") +} +glog.V(2).Infoln("Processed", nItems, "elements") +``` + +The repository contains an open source version of the log package used inside Google. The master copy of the source lives inside Google, not here. The code in this repo is for export only and is not itself under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go index 54bd7afdc..718c34f88 100644 --- a/vendor/github.com/golang/glog/glog.go +++ b/vendor/github.com/golang/glog/glog.go @@ -879,7 +879,7 @@ const flushInterval = 30 * time.Second // flushDaemon periodically flushes the log file buffers. func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { + for range time.NewTicker(flushInterval).C { l.lockAndFlushAll() } } @@ -994,7 +994,7 @@ type Verbose bool // // Whether an individual call to V generates a log record depends on the setting of // the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the +// V is at most the value of -v, or of -vmodule for the source file containing the // call, the V call will log. func V(level Level) Verbose { // This function tries hard to be cheap unless there's work to do. diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go deleted file mode 100644 index 8d82abe21..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/struct/struct.proto - -package structpb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - structpb "google.golang.org/protobuf/types/known/structpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/struct.proto. - -type NullValue = structpb.NullValue - -const NullValue_NULL_VALUE = structpb.NullValue_NULL_VALUE - -var NullValue_name = structpb.NullValue_name -var NullValue_value = structpb.NullValue_value - -type Struct = structpb.Struct -type Value = structpb.Value -type Value_NullValue = structpb.Value_NullValue -type Value_NumberValue = structpb.Value_NumberValue -type Value_StringValue = structpb.Value_StringValue -type Value_BoolValue = structpb.Value_BoolValue -type Value_StructValue = structpb.Value_StructValue -type Value_ListValue = structpb.Value_ListValue -type ListValue = structpb.ListValue - -var File_github_com_golang_protobuf_ptypes_struct_struct_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = []byte{ - 0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x3b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() } -func file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() { - if File_github_com_golang_protobuf_ptypes_struct_struct_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_struct_struct_proto = out.File - file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go deleted file mode 100644 index cc40f27ad..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ /dev/null @@ -1,71 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto - -package wrappers - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/wrappers.proto. - -type DoubleValue = wrapperspb.DoubleValue -type FloatValue = wrapperspb.FloatValue -type Int64Value = wrapperspb.Int64Value -type UInt64Value = wrapperspb.UInt64Value -type Int32Value = wrapperspb.Int32Value -type UInt32Value = wrapperspb.UInt32Value -type BoolValue = wrapperspb.BoolValue -type StringValue = wrapperspb.StringValue -type BytesValue = wrapperspb.BytesValue - -var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() } -func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() { - if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File - file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod deleted file mode 100644 index f6406bb2c..000000000 --- a/vendor/github.com/golang/snappy/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/golang/snappy diff --git a/vendor/github.com/gorilla/handlers/go.mod b/vendor/github.com/gorilla/handlers/go.mod deleted file mode 100644 index 58e6a85ec..000000000 --- a/vendor/github.com/gorilla/handlers/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/gorilla/handlers - -go 1.14 - -require github.com/felixge/httpsnoop v1.0.1 diff --git a/vendor/github.com/gorilla/handlers/go.sum b/vendor/github.com/gorilla/handlers/go.sum deleted file mode 100644 index 8c2645804..000000000 --- a/vendor/github.com/gorilla/handlers/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= diff --git a/vendor/github.com/gorilla/mux/go.mod b/vendor/github.com/gorilla/mux/go.mod deleted file mode 100644 index df170a399..000000000 --- a/vendor/github.com/gorilla/mux/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/gorilla/mux - -go 1.12 diff --git a/vendor/github.com/gorilla/websocket/go.mod b/vendor/github.com/gorilla/websocket/go.mod deleted file mode 100644 index 1a7afd502..000000000 --- a/vendor/github.com/gorilla/websocket/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/gorilla/websocket - -go 1.12 diff --git a/vendor/github.com/gorilla/websocket/go.sum b/vendor/github.com/gorilla/websocket/go.sum deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/casing/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/casing/BUILD.bazel index 31481939d..672b35647 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/casing/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/casing/BUILD.bazel @@ -1,8 +1,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( - name = "go_default_library", + name = "casing", srcs = ["camel.go"], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/casing", visibility = ["//:__subpackages__"], ) + +alias( + name = "go_default_library", + actual = ":casing", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/codegenerator/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/codegenerator/BUILD.bazel index 473f329a6..679fa4767 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/codegenerator/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/codegenerator/BUILD.bazel @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") package(default_visibility = ["//visibility:public"]) go_library( - name = "go_default_library", + name = "codegenerator", srcs = [ "doc.go", "parse_req.go", @@ -11,20 +11,26 @@ go_library( ], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/codegenerator", deps = [ - "@org_golang_google_protobuf//compiler/protogen:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//types/pluginpb:go_default_library", + "@org_golang_google_protobuf//compiler/protogen", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/pluginpb", ], ) go_test( - name = "go_default_test", + name = "codegenerator_test", srcs = ["parse_req_test.go"], - embed = [":go_default_library"], + embed = [":codegenerator"], deps = [ - "@com_github_google_go_cmp//cmp:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//testing/protocmp:go_default_library", - "@org_golang_google_protobuf//types/pluginpb:go_default_library", + "@com_github_google_go_cmp//cmp", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/pluginpb", ], ) + +alias( + name = "go_default_library", + actual = ":codegenerator", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/BUILD.bazel index 672d67451..b9473e24f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/BUILD.bazel @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") package(default_visibility = ["//visibility:public"]) go_library( - name = "go_default_library", + name = "descriptor", srcs = [ "grpc_api_configuration.go", "openapi_configuration.go", @@ -13,25 +13,25 @@ go_library( ], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor", deps = [ - "//internal/casing:go_default_library", - "//internal/codegenerator:go_default_library", - "//internal/descriptor/apiconfig:go_default_library", - "//internal/descriptor/openapiconfig:go_default_library", - "//internal/httprule:go_default_library", - "//protoc-gen-openapiv2/options:go_default_library", - "@com_github_ghodss_yaml//:go_default_library", - "@com_github_golang_glog//:go_default_library", + "//internal/casing", + "//internal/codegenerator", + "//internal/descriptor/apiconfig", + "//internal/descriptor/openapiconfig", + "//internal/httprule", + "//protoc-gen-openapiv2/options", + "@com_github_ghodss_yaml//:yaml", + "@com_github_golang_glog//:glog", "@go_googleapis//google/api:annotations_go_proto", - "@org_golang_google_protobuf//compiler/protogen:go_default_library", - "@org_golang_google_protobuf//encoding/protojson:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//types/descriptorpb:go_default_library", - "@org_golang_google_protobuf//types/pluginpb:go_default_library", + "@org_golang_google_protobuf//compiler/protogen", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/descriptorpb", + "@org_golang_google_protobuf//types/pluginpb", ], ) go_test( - name = "go_default_test", + name = "descriptor_test", size = "small", srcs = [ "grpc_api_configuration_test.go", @@ -40,15 +40,21 @@ go_test( "services_test.go", "types_test.go", ], - embed = [":go_default_library"], + embed = [":descriptor"], deps = [ - "//internal/descriptor/openapiconfig:go_default_library", - "//internal/httprule:go_default_library", - "//protoc-gen-openapiv2/options:go_default_library", - "@org_golang_google_protobuf//compiler/protogen:go_default_library", - "@org_golang_google_protobuf//encoding/prototext:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//types/descriptorpb:go_default_library", - "@org_golang_google_protobuf//types/pluginpb:go_default_library", + "//internal/descriptor/openapiconfig", + "//internal/httprule", + "//protoc-gen-openapiv2/options", + "@org_golang_google_protobuf//compiler/protogen", + "@org_golang_google_protobuf//encoding/prototext", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/descriptorpb", + "@org_golang_google_protobuf//types/pluginpb", ], ) + +alias( + name = "go_default_library", + actual = ":descriptor", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/BUILD.bazel index 80dc57a2e..9fe7533ca 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/BUILD.bazel @@ -23,7 +23,13 @@ go_proto_library( ) go_library( - name = "go_default_library", + name = "apiconfig", embed = [":apiconfig_go_proto"], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig", ) + +alias( + name = "go_default_library", + actual = ":apiconfig", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/apiconfig.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/apiconfig.pb.go index dbae4de16..c13ec108e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/apiconfig.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/apiconfig.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.13.0 +// protoc-gen-go v1.27.1 +// protoc v3.17.3 // source: internal/descriptor/apiconfig/apiconfig.proto package apiconfig diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/apiconfig.swagger.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/apiconfig.swagger.json index 685eb474e..4ce32e78e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/apiconfig.swagger.json +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig/apiconfig.swagger.json @@ -15,14 +15,11 @@ "protobufAny": { "type": "object", "properties": { - "typeUrl": { + "@type": { "type": "string" - }, - "value": { - "type": "string", - "format": "byte" } - } + }, + "additionalProperties": {} }, "rpcStatus": { "type": "object", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/BUILD.bazel index 29156c846..628983396 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/BUILD.bazel @@ -15,12 +15,18 @@ go_proto_library( importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig", proto = ":openapiconfig_proto", visibility = ["//:__subpackages__"], - deps = ["//protoc-gen-openapiv2/options:go_default_library"], + deps = ["//protoc-gen-openapiv2/options"], ) go_library( - name = "go_default_library", + name = "openapiconfig", embed = [":openapiconfig_go_proto"], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig", visibility = ["//:__subpackages__"], ) + +alias( + name = "go_default_library", + actual = ":openapiconfig", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/openapiconfig.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/openapiconfig.pb.go index 02605ed4e..9c0fa7fbc 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/openapiconfig.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/openapiconfig.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.13.0 +// protoc-gen-go v1.27.1 +// protoc v3.17.3 // source: internal/descriptor/openapiconfig/openapiconfig.proto package openapiconfig diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/openapiconfig.swagger.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/openapiconfig.swagger.json index c5388481f..a48ccf649 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/openapiconfig.swagger.json +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig/openapiconfig.swagger.json @@ -15,14 +15,11 @@ "protobufAny": { "type": "object", "properties": { - "typeUrl": { + "@type": { "type": "string" - }, - "value": { - "type": "string", - "format": "byte" } - } + }, + "additionalProperties": {} }, "rpcStatus": { "type": "object", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/registry.go index 64245835a..7576e0602 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/registry.go @@ -61,11 +61,15 @@ type Registry struct { // with gRPC-Gateway response, if it uses json tags for marshaling. useJSONNamesForFields bool - // useFQNForOpenAPIName if true OpenAPI names will use the full qualified name (FQN) from proto definition, - // and generate a dot-separated OpenAPI name concatenating all elements from the proto FQN. - // If false, the default behavior is to concat the last 2 elements of the FQN if they are unique, otherwise concat - // all the elements of the FQN without any separator - useFQNForOpenAPIName bool + // openAPINamingStrategy is the naming strategy to use for assigning OpenAPI field and parameter names. This can be one of the following: + // - `legacy`: use the legacy naming strategy from protoc-gen-swagger, that generates unique but not necessarily + // maximally concise names. Components are concatenated directly, e.g., `MyOuterMessageMyNestedMessage`. + // - `simple`: use a simple heuristic for generating unique and concise names. Components are concatenated using + // dots as a separator, e.g., `MyOuterMesage.MyNestedMessage` (if `MyNestedMessage` alone is unique, + // `MyNestedMessage` will be used as the OpenAPI name). + // - `fqn`: always use the fully-qualified name of the proto message (leading dot removed) as the OpenAPI + // name. + openAPINamingStrategy string // useGoTemplate determines whether you want to use GO templates // in your protofile comments @@ -87,6 +91,9 @@ type Registry struct { // has no HttpRule annotation. warnOnUnboundMethods bool + // proto3OptionalNullable specifies whether Proto3 Optional fields should be marked as x-nullable. + proto3OptionalNullable bool + // fileOptions is a mapping of file name to additional OpenAPI file options fileOptions map[string]*options.Swagger @@ -109,6 +116,12 @@ type Registry struct { // omitPackageDoc, if false, causes a package comment to be included in the generated code. omitPackageDoc bool + + // recursiveDepth sets the maximum depth of a field parameter + recursiveDepth int + + // annotationMap is used to check for duplicate HTTP annotations + annotationMap map[annotationIdentifier]struct{} } type repeatedFieldSeparator struct { @@ -116,15 +129,21 @@ type repeatedFieldSeparator struct { sep rune } +type annotationIdentifier struct { + method string + pathTemplate string +} + // NewRegistry returns a new Registry. func NewRegistry() *Registry { return &Registry{ - msgs: make(map[string]*Message), - enums: make(map[string]*Enum), - files: make(map[string]*File), - pkgMap: make(map[string]string), - pkgAliases: make(map[string]string), - externalHTTPRules: make(map[string][]*annotations.HttpRule), + msgs: make(map[string]*Message), + enums: make(map[string]*Enum), + files: make(map[string]*File), + pkgMap: make(map[string]string), + pkgAliases: make(map[string]string), + externalHTTPRules: make(map[string][]*annotations.HttpRule), + openAPINamingStrategy: "legacy", repeatedPathParamSeparator: repeatedFieldSeparator{ name: "csv", sep: ',', @@ -134,6 +153,8 @@ func NewRegistry() *Registry { messageOptions: make(map[string]*options.Schema), serviceOptions: make(map[string]*options.Tag), fieldOptions: make(map[string]*options.JSONSchema), + annotationMap: make(map[annotationIdentifier]struct{}), + recursiveDepth: 1000, } } @@ -356,6 +377,16 @@ func (r *Registry) SetStandalone(standalone bool) { r.standalone = standalone } +// SetRecursiveDepth records the max recursion count +func (r *Registry) SetRecursiveDepth(count int) { + r.recursiveDepth = count +} + +// GetRecursiveDepth returns the max recursion count +func (r *Registry) GetRecursiveDepth() int { + return r.recursiveDepth +} + // ReserveGoPackageAlias reserves the unique alias of go package. // If succeeded, the alias will be never used for other packages in generated go files. // If failed, the alias is already taken by another package, so you need to use another @@ -480,13 +511,15 @@ func (r *Registry) GetUseJSONNamesForFields() bool { } // SetUseFQNForOpenAPIName sets useFQNForOpenAPIName +// Deprecated: use SetOpenAPINamingStrategy instead. func (r *Registry) SetUseFQNForOpenAPIName(use bool) { - r.useFQNForOpenAPIName = use + r.openAPINamingStrategy = "fqn" } // GetUseFQNForOpenAPIName returns useFQNForOpenAPIName +// Deprecated: Use GetOpenAPINamingStrategy(). func (r *Registry) GetUseFQNForOpenAPIName() bool { - return r.useFQNForOpenAPIName + return r.openAPINamingStrategy == "fqn" } // GetMergeFileName return the target merge OpenAPI file name @@ -494,6 +527,16 @@ func (r *Registry) GetMergeFileName() string { return r.mergeFileName } +// SetOpenAPINamingStrategy sets the naming strategy to be used. +func (r *Registry) SetOpenAPINamingStrategy(strategy string) { + r.openAPINamingStrategy = strategy +} + +// GetOpenAPINamingStrategy retrieves the naming strategy that is in use. +func (r *Registry) GetOpenAPINamingStrategy() string { + return r.openAPINamingStrategy +} + // SetUseGoTemplate sets useGoTemplate func (r *Registry) SetUseGoTemplate(use bool) { r.useGoTemplate = use @@ -554,6 +597,16 @@ func (r *Registry) GetOmitPackageDoc() bool { return r.omitPackageDoc } +// SetProto3OptionalNullable set proto3OtionalNullable +func (r *Registry) SetProto3OptionalNullable(proto3OtionalNullable bool) { + r.proto3OptionalNullable = proto3OtionalNullable +} + +// GetProto3OptionalNullable returns proto3OtionalNullable +func (r *Registry) GetProto3OptionalNullable() bool { + return r.proto3OptionalNullable +} + // RegisterOpenAPIOptions registers OpenAPI options func (r *Registry) RegisterOpenAPIOptions(opts *openapiconfig.OpenAPIOptions) error { if opts == nil { @@ -649,3 +702,20 @@ func (r *Registry) GetOpenAPIFieldOption(qualifiedField string) (*options.JSONSc opt, ok := r.fieldOptions[qualifiedField] return opt, ok } + +func (r *Registry) FieldName(f *Field) string { + if r.useJSONNamesForFields { + return f.GetJsonName() + } + return f.GetName() +} + +func (r *Registry) CheckDuplicateAnnotation(httpMethod string, httpTemplate string) error { + a := annotationIdentifier{method: httpMethod, pathTemplate: httpTemplate} + _, ok := r.annotationMap[a] + if ok { + return fmt.Errorf("duplicate annotation: method=%s, template=%s", httpMethod, httpTemplate) + } + r.annotationMap[a] = struct{}{} + return nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/generator/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/generator/BUILD.bazel index 93027afe2..23727ccf5 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/generator/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/generator/BUILD.bazel @@ -3,8 +3,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") package(default_visibility = ["//visibility:public"]) go_library( - name = "go_default_library", + name = "generator", srcs = ["generator.go"], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/generator", - deps = ["//internal/descriptor:go_default_library"], + deps = ["//internal/descriptor"], +) + +alias( + name = "go_default_library", + actual = ":generator", + visibility = ["//:__subpackages__"], ) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel index 5a718b384..f694f3c0d 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel @@ -3,27 +3,33 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") package(default_visibility = ["//visibility:public"]) go_library( - name = "go_default_library", + name = "httprule", srcs = [ "compile.go", "parse.go", "types.go", ], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule", - deps = ["//utilities:go_default_library"], + deps = ["//utilities"], ) go_test( - name = "go_default_test", + name = "httprule_test", size = "small", srcs = [ "compile_test.go", "parse_test.go", "types_test.go", ], - embed = [":go_default_library"], + embed = [":httprule"], deps = [ - "//utilities:go_default_library", - "@com_github_golang_glog//:go_default_library", + "//utilities", + "@com_github_golang_glog//:glog", ], ) + +alias( + name = "go_default_library", + actual = ":httprule", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go index 162319d3f..3cd937295 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go @@ -96,6 +96,10 @@ func (t template) Compile() Template { if op.str == "" { ops = append(ops, op.num) } else { + // eof segment literal represents the "/" path pattern + if op.str == eof { + op.str = "" + } if _, ok := consts[op.str]; !ok { consts[op.str] = len(pool) pool = append(pool, op.str) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go index 7c7140f19..5edd784e6 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go @@ -118,6 +118,10 @@ type parser struct { // topLevelSegments is the target of this parser. func (p *parser) topLevelSegments() ([]segment, error) { + if _, err := p.accept(typeEOF); err == nil { + p.tokens = p.tokens[:0] + return []segment{literal(eof)}, nil + } segs, err := p.segments() if err != nil { return nil, err diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/BUILD.bazel index 34f26313a..aa7ba0752 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/BUILD.bazel @@ -4,21 +4,21 @@ load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") package(default_visibility = ["//visibility:private"]) go_library( - name = "go_default_library", + name = "protoc-gen-grpc-gateway_lib", srcs = ["main.go"], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway", deps = [ - "//internal/codegenerator:go_default_library", - "//internal/descriptor:go_default_library", - "//protoc-gen-grpc-gateway/internal/gengateway:go_default_library", - "@com_github_golang_glog//:go_default_library", - "@org_golang_google_protobuf//compiler/protogen:go_default_library", + "//internal/codegenerator", + "//internal/descriptor", + "//protoc-gen-grpc-gateway/internal/gengateway", + "@com_github_golang_glog//:glog", + "@org_golang_google_protobuf//compiler/protogen", ], ) go_binary( name = "protoc-gen-grpc-gateway", - embed = [":go_default_library"], + embed = [":protoc-gen-grpc-gateway_lib"], visibility = ["//visibility:public"], ) @@ -44,8 +44,8 @@ go_proto_compiler( ) go_test( - name = "go_default_test", + name = "protoc-gen-grpc-gateway_test", srcs = ["main_test.go"], - embed = [":go_default_library"], - deps = ["//internal/descriptor:go_default_library"], + embed = [":protoc-gen-grpc-gateway_lib"], + deps = ["//internal/descriptor"], ) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/internal/gengateway/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/internal/gengateway/BUILD.bazel index 4c9445ab4..31ecb812a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/internal/gengateway/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/internal/gengateway/BUILD.bazel @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") package(default_visibility = ["//protoc-gen-grpc-gateway:__subpackages__"]) go_library( - name = "go_default_library", + name = "gengateway", srcs = [ "doc.go", "generator.go", @@ -11,28 +11,34 @@ go_library( ], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/internal/gengateway", deps = [ - "//internal/casing:go_default_library", - "//internal/descriptor:go_default_library", - "//internal/generator:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_glog//:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//types/pluginpb:go_default_library", + "//internal/casing", + "//internal/descriptor", + "//internal/generator", + "//utilities", + "@com_github_golang_glog//:glog", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/pluginpb", ], ) go_test( - name = "go_default_test", + name = "gengateway_test", size = "small", srcs = [ "generator_test.go", "template_test.go", ], - embed = [":go_default_library"], + embed = [":gengateway"], deps = [ - "//internal/descriptor:go_default_library", - "//internal/httprule:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//types/descriptorpb:go_default_library", + "//internal/descriptor", + "//internal/httprule", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/descriptorpb", ], ) + +alias( + name = "go_default_library", + actual = ":gengateway", + visibility = ["//protoc-gen-grpc-gateway:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/internal/gengateway/template.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/internal/gengateway/template.go index 028761ffa..871b3d84f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/internal/gengateway/template.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/internal/gengateway/template.go @@ -28,7 +28,7 @@ type binding struct { AllowPatchFeature bool } -// GetBodyFieldPath returns the binding body's fieldpath. +// GetBodyFieldPath returns the binding body's field path. func (b binding) GetBodyFieldPath() string { if b.Body != nil && len(b.Body.FieldPath) != 0 { return b.Body.FieldPath.String() @@ -36,7 +36,7 @@ func (b binding) GetBodyFieldPath() string { return "*" } -// GetBodyFieldPath returns the binding body's struct field name. +// GetBodyFieldStructName returns the binding body's struct field name. func (b binding) GetBodyFieldStructName() (string, error) { if b.Body != nil && len(b.Body.FieldPath) != 0 { return casing.Camel(b.Body.FieldPath.String()), nil @@ -170,6 +170,10 @@ func applyTemplate(p param, reg *descriptor.Registry) (string, error) { methName := casing.Camel(*meth.Name) meth.Name = &methName for _, b := range meth.Bindings { + if err := reg.CheckDuplicateAnnotation(b.HTTPMethod, b.PathTmpl.Template); err != nil { + return "", err + } + methodWithBindingsSeen = true if err := handlerTemplate.Execute(w, binding{ Binding: b, @@ -435,15 +439,6 @@ var ( } return nil } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Infof("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } go func() { for { if err := handleSend(); err != nil { @@ -600,7 +595,11 @@ func Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}Server(ctx context.Context, var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + {{- if $b.PathTmpl }} + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/{{$svc.File.GetPackage}}.{{$svc.GetName}}/{{$m.GetName}}", runtime.WithHTTPPathPattern("{{$b.PathTmpl.Template}}")) + {{- else -}} rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/{{$svc.File.GetPackage}}.{{$svc.GetName}}/{{$m.GetName}}") + {{- end }} if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -676,7 +675,11 @@ func Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}Client(ctx context.Context, {{- end }} defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + {{- if $b.PathTmpl }} + rctx, err := runtime.AnnotateContext(ctx, mux, req, "/{{$svc.File.GetPackage}}.{{$svc.GetName}}/{{$m.GetName}}", runtime.WithHTTPPathPattern("{{$b.PathTmpl.Template}}")) + {{- else -}} rctx, err := runtime.AnnotateContext(ctx, mux, req, "/{{$svc.File.GetPackage}}.{{$svc.GetName}}/{{$m.GetName}}") + {{- end }} if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/BUILD.bazel index 90e711b18..56afabe05 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/BUILD.bazel @@ -3,28 +3,28 @@ load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test") package(default_visibility = ["//visibility:private"]) go_library( - name = "go_default_library", + name = "protoc-gen-openapiv2_lib", srcs = ["main.go"], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2", deps = [ - "//internal/codegenerator:go_default_library", - "//internal/descriptor:go_default_library", - "//protoc-gen-openapiv2/internal/genopenapi:go_default_library", - "@com_github_golang_glog//:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//types/pluginpb:go_default_library", + "//internal/codegenerator", + "//internal/descriptor", + "//protoc-gen-openapiv2/internal/genopenapi", + "@com_github_golang_glog//:glog", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/pluginpb", ], ) go_binary( name = "protoc-gen-openapiv2", - embed = [":go_default_library"], + embed = [":protoc-gen-openapiv2_lib"], visibility = ["//visibility:public"], ) go_test( - name = "go_default_test", + name = "protoc-gen-openapiv2_test", size = "small", srcs = ["main_test.go"], - embed = [":go_default_library"], + embed = [":protoc-gen-openapiv2_lib"], ) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/defs.bzl b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/defs.bzl index ea3226762..c8668f220 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/defs.bzl +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/defs.bzl @@ -27,12 +27,12 @@ def _direct_source_infos(proto_info, provided_sources = []): source_root = proto_info.proto_source_root if "." == source_root: - return [struct(file = src, import_path = src.path) for src in proto_info.direct_sources] + return [struct(file = src, import_path = src.path) for src in proto_info.check_deps_sources.to_list()] offset = len(source_root) + 1 # + '/'. infos = [] - for src in proto_info.direct_sources: + for src in proto_info.check_deps_sources.to_list(): # TODO(yannic): Remove this hack when we drop support for Bazel < 1.0. local_offset = offset if src.root.path and not source_root.startswith(src.root.path): @@ -58,10 +58,12 @@ def _run_proto_gen_openapi( repeated_path_param_separator, include_package_in_tags, fqn_for_openapi_name, + openapi_naming_strategy, use_go_templates, disable_default_errors, enums_as_ints, simple_operation_ids, + proto3_optional_nullable, openapi_configuration, generate_unbound_methods): args = actions.args() @@ -86,6 +88,9 @@ def _run_proto_gen_openapi( if fqn_for_openapi_name: args.add("--openapiv2_opt", "fqn_for_openapi_name=true") + if openapi_naming_strategy: + args.add("--openapiv2_opt", "openapi_naming_strategy=%s" % openapi_naming_strategy) + if generate_unbound_methods: args.add("--openapiv2_opt", "generate_unbound_methods=true") @@ -107,6 +112,9 @@ def _run_proto_gen_openapi( if enums_as_ints: args.add("--openapiv2_opt", "enums_as_ints=true") + if proto3_optional_nullable: + args.add("--openapiv2_opt", "proto3_optional_nullable=true") + args.add("--openapiv2_opt", "repeated_path_param_separator=%s" % repeated_path_param_separator) proto_file_infos = _direct_source_infos(proto_info) @@ -197,10 +205,12 @@ def _proto_gen_openapi_impl(ctx): repeated_path_param_separator = ctx.attr.repeated_path_param_separator, include_package_in_tags = ctx.attr.include_package_in_tags, fqn_for_openapi_name = ctx.attr.fqn_for_openapi_name, + openapi_naming_strategy = ctx.attr.openapi_naming_strategy, use_go_templates = ctx.attr.use_go_templates, disable_default_errors = ctx.attr.disable_default_errors, enums_as_ints = ctx.attr.enums_as_ints, simple_operation_ids = ctx.attr.simple_operation_ids, + proto3_optional_nullable = ctx.attr.proto3_optional_nullable, openapi_configuration = ctx.file.openapi_configuration, generate_unbound_methods = ctx.attr.generate_unbound_methods, ), @@ -256,6 +266,15 @@ protoc_gen_openapiv2 = rule( " qualified names from the proto definition" + " (ie my.package.MyMessage.MyInnerMessage", ), + "openapi_naming_strategy": attr.string( + default = "", + mandatory = False, + values = ["", "simple", "legacy", "fqn"], + doc = "configures how OpenAPI names are determined." + + " Allowed values are `` (empty), `simple`, `legacy` and `fqn`." + + " If unset, either `legacy` or `fqn` are selected, depending" + + " on the value of the `fqn_for_openapi_name` setting", + ), "use_go_templates": attr.bool( default = False, mandatory = False, @@ -278,6 +297,11 @@ protoc_gen_openapiv2 = rule( doc = "whether to remove the service prefix in the operationID" + " generation. Can introduce duplicate operationIDs, use with caution.", ), + "proto3_optional_nullable": attr.bool( + default = False, + mandatory = False, + doc = "whether Proto3 Optional fields should be marked as x-nullable", + ), "openapi_configuration": attr.label( allow_single_file = True, mandatory = False, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/BUILD.bazel index 5442a8d26..ee267caab 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/BUILD.bazel @@ -3,55 +3,67 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") package(default_visibility = ["//protoc-gen-openapiv2:__subpackages__"]) go_library( - name = "go_default_library", + name = "genopenapi", srcs = [ "doc.go", "generator.go", "helpers.go", "helpers_go111_old.go", + "naming.go", "template.go", "types.go", ], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi", deps = [ - "//internal/casing:go_default_library", - "//internal/descriptor:go_default_library", - "//internal/generator:go_default_library", - "//protoc-gen-openapiv2/options:go_default_library", - "@com_github_golang_glog//:go_default_library", + "//internal/casing", + "//internal/descriptor", + "//internal/generator", + "//protoc-gen-openapiv2/options", + "@com_github_golang_glog//:glog", "@com_github_golang_protobuf//descriptor:go_default_library_gen", "@go_googleapis//google/api:annotations_go_proto", "@go_googleapis//google/rpc:status_go_proto", "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@org_golang_google_protobuf//encoding/protojson:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//types/descriptorpb:go_default_library", - "@org_golang_google_protobuf//types/pluginpb:go_default_library", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//types/descriptorpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/pluginpb", ], ) go_test( - name = "go_default_test", + name = "genopenapi_test", size = "small", - srcs = ["template_test.go"], - embed = [":go_default_library"], + srcs = [ + "cycle_test.go", + "naming_test.go", + "template_test.go", + ], + embed = [":genopenapi"], deps = [ - "//internal/descriptor:go_default_library", - "//internal/descriptor/openapiconfig:go_default_library", - "//internal/httprule:go_default_library", - "//protoc-gen-openapiv2/options:go_default_library", - "//runtime:go_default_library", - "@com_github_google_go_cmp//cmp:go_default_library", + "//internal/descriptor", + "//internal/descriptor/openapiconfig", + "//internal/httprule", + "//protoc-gen-openapiv2/options", + "//runtime", + "@com_github_google_go_cmp//cmp", "@go_googleapis//google/api:annotations_go_proto", "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//reflect/protodesc:go_default_library", - "@org_golang_google_protobuf//types/descriptorpb:go_default_library", - "@org_golang_google_protobuf//types/known/durationpb:go_default_library", - "@org_golang_google_protobuf//types/known/structpb:go_default_library", - "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", - "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", - "@org_golang_google_protobuf//types/pluginpb:go_default_library", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protodesc", + "@org_golang_google_protobuf//types/descriptorpb", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + "@org_golang_google_protobuf//types/pluginpb", ], ) + +alias( + name = "go_default_library", + actual = ":genopenapi", + visibility = ["//protoc-gen-openapiv2:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/generator.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/generator.go index 65a2b6a22..0848b4005 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/generator.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/generator.go @@ -36,6 +36,11 @@ type wrapper struct { swagger *openapiSwaggerObject } +type GeneratorOptions struct { + Registry *descriptor.Registry + RecursiveDepth int +} + // New returns a new generator which generates grpc gateway files. func New(reg *descriptor.Registry) gen.Generator { return &generator{reg: reg} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/naming.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/naming.go new file mode 100644 index 000000000..338ea2dcd --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/naming.go @@ -0,0 +1,110 @@ +package genopenapi + +import ( + "reflect" + "strings" +) + +// LookupNamingStrategy looks up the given naming strategy and returns the naming +// strategy function for it. The naming strategy function takes in the list of all +// fully-qualified proto message names, and returns a mapping from fully-qualified +// name to OpenAPI name. +func LookupNamingStrategy(strategyName string) func([]string) map[string]string { + switch strings.ToLower(strategyName) { + case "fqn": + return resolveNamesFQN + case "legacy": + return resolveNamesLegacy + case "simple": + return resolveNamesSimple + } + return nil +} + +// resolveNamesFQN uses the fully-qualified proto message name as the +// OpenAPI name, stripping the leading dot. +func resolveNamesFQN(messages []string) map[string]string { + uniqueNames := make(map[string]string, len(messages)) + for _, p := range messages { + // strip leading dot from proto fqn + uniqueNames[p] = p[1:] + } + return uniqueNames +} + +// resolveNamesLegacy takes the names of all proto messages and generates unique references by +// applying the legacy heuristics for deriving unique names: starting from the bottom of the name hierarchy, it +// determines the minimum number of components necessary to yield a unique name, adds one +// to that number, and then concatenates those last components with no separator in between +// to form a unique name. +// +// E.g., if the fully qualified name is `.a.b.C.D`, and there are other messages with fully +// qualified names ending in `.D` but not in `.C.D`, it assigns the unique name `bCD`. +func resolveNamesLegacy(messages []string) map[string]string { + return resolveNamesUniqueWithContext(messages, 1, "") +} + +// resolveNamesSimple takes the names of all proto messages and generates unique references by using a simple +// heuristic: starting from the bottom of the name hierarchy, it determines the minimum +// number of components necessary to yield a unique name, and then concatenates those last +// components with a "." separator in between to form a unique name. +// +// E.g., if the fully qualified name is `.a.b.C.D`, and there are other messages with +// fully qualified names ending in `.D` but not in `.C.D`, it assigns the unique name `C.D`. +func resolveNamesSimple(messages []string) map[string]string { + return resolveNamesUniqueWithContext(messages, 0, ".") +} + +// Take the names of every proto message and generates a unique reference by: +// first, separating each message name into its components by splitting at dots. Then, +// take the shortest suffix slice from each components slice that is unique among all +// messages, and convert it into a component name by taking extraContext additional +// components into consideration and joining all components with componentSeparator. +func resolveNamesUniqueWithContext(messages []string, extraContext int, componentSeparator string) map[string]string { + packagesByDepth := make(map[int][][]string) + uniqueNames := make(map[string]string) + + hierarchy := func(pkg string) []string { + return strings.Split(pkg, ".") + } + + for _, p := range messages { + h := hierarchy(p) + for depth := range h { + if _, ok := packagesByDepth[depth]; !ok { + packagesByDepth[depth] = make([][]string, 0) + } + packagesByDepth[depth] = append(packagesByDepth[depth], h[len(h)-depth:]) + } + } + + count := func(list [][]string, item []string) int { + i := 0 + for _, element := range list { + if reflect.DeepEqual(element, item) { + i++ + } + } + return i + } + + for _, p := range messages { + h := hierarchy(p) + depth := 0 + for ; depth < len(h); depth++ { + // depth + extraContext > 0 ensures that we only break for values of depth when the + // resulting slice of name components is non-empty. Otherwise, we would return the + // empty string as the concise unique name is len(messages) == 1 (which is + // technically correct). + if depth+extraContext > 0 && count(packagesByDepth[depth], h[len(h)-depth:]) == 1 { + break + } + } + start := len(h) - depth - extraContext + if start < 0 { + start = 0 + } + uniqueNames[p] = strings.Join(h[start:], componentSeparator) + } + return uniqueNames +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/template.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/template.go index fb8fa9fe0..19487b13c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/template.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/template.go @@ -18,7 +18,6 @@ import ( "time" "github.com/golang/glog" - structpb "github.com/golang/protobuf/ptypes/struct" "github.com/grpc-ecosystem/grpc-gateway/v2/internal/casing" "github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor" openapi_options "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options" @@ -26,6 +25,7 @@ import ( "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/known/structpb" ) // wktSchemas are the schemas of well-known-types. @@ -120,7 +120,7 @@ func getEnumDefault(enum *descriptor.Enum) string { // messageToQueryParameters converts a message to a list of OpenAPI query parameters. func messageToQueryParameters(message *descriptor.Message, reg *descriptor.Registry, pathParams []descriptor.Parameter, body *descriptor.Body) (params []openapiParameterObject, err error) { for _, field := range message.Fields { - p, err := queryParams(message, field, "", reg, pathParams, body) + p, err := queryParams(message, field, "", reg, pathParams, body, reg.GetRecursiveDepth()) if err != nil { return nil, err } @@ -130,17 +130,64 @@ func messageToQueryParameters(message *descriptor.Message, reg *descriptor.Regis } // queryParams converts a field to a list of OpenAPI query parameters recursively through the use of nestedQueryParams. -func queryParams(message *descriptor.Message, field *descriptor.Field, prefix string, reg *descriptor.Registry, pathParams []descriptor.Parameter, body *descriptor.Body) (params []openapiParameterObject, err error) { - return nestedQueryParams(message, field, prefix, reg, pathParams, body, map[string]bool{}) +func queryParams(message *descriptor.Message, field *descriptor.Field, prefix string, reg *descriptor.Registry, pathParams []descriptor.Parameter, body *descriptor.Body, recursiveCount int) (params []openapiParameterObject, err error) { + return nestedQueryParams(message, field, prefix, reg, pathParams, body, newCycleChecker(recursiveCount)) +} + +type cycleChecker struct { + m map[string]int + count int +} + +func newCycleChecker(recursive int) *cycleChecker { + return &cycleChecker{ + m: make(map[string]int), + count: recursive, + } +} + +// Check returns whether name is still within recursion +// toleration +func (c *cycleChecker) Check(name string) bool { + count, ok := c.m[name] + count = count + 1 + isCycle := count > c.count + + if isCycle { + return false + } + + // provision map entry if not available + if !ok { + c.m[name] = 1 + return true + } + + c.m[name] = count + + return true +} + +func (c *cycleChecker) Branch() *cycleChecker { + copy := &cycleChecker{ + count: c.count, + m: map[string]int{}, + } + + for k, v := range c.m { + copy.m[k] = v + } + + return copy } // nestedQueryParams converts a field to a list of OpenAPI query parameters recursively. // This function is a helper function for queryParams, that keeps track of cyclical message references // through the use of -// touched map[string]bool -// If a cycle is discovered, an error is returned, as cyclical data structures aren't allowed +// touched map[string]int +// If a cycle is discovered, an error is returned, as cyclical data structures are dangerous // in query parameters. -func nestedQueryParams(message *descriptor.Message, field *descriptor.Field, prefix string, reg *descriptor.Registry, pathParams []descriptor.Parameter, body *descriptor.Body, touchedIn map[string]bool) (params []openapiParameterObject, err error) { +func nestedQueryParams(message *descriptor.Message, field *descriptor.Field, prefix string, reg *descriptor.Registry, pathParams []descriptor.Parameter, body *descriptor.Body, cycle *cycleChecker) (params []openapiParameterObject, err error) { // make sure the parameter is not already listed as a path parameter for _, pathParam := range pathParams { if pathParam.Target == field { @@ -203,11 +250,7 @@ func nestedQueryParams(message *descriptor.Message, field *descriptor.Field, pre param.CollectionFormat = "multi" } - if reg.GetUseJSONNamesForFields() { - param.Name = prefix + field.GetJsonName() - } else { - param.Name = prefix + field.GetName() - } + param.Name = prefix + reg.FieldName(field) if isEnum { enum, err := reg.LookupEnum("", fieldType) @@ -248,27 +291,18 @@ func nestedQueryParams(message *descriptor.Message, field *descriptor.Field, pre } // Check for cyclical message reference: - isCycle := touchedIn[*msg.Name] - if isCycle { - return nil, fmt.Errorf("recursive types are not allowed for query parameters, cycle found on %q", fieldType) + isOK := cycle.Check(*msg.Name) + if !isOK { + return nil, fmt.Errorf("exceeded recursive count (%d) for query parameter %q", cycle.count, fieldType) } // Construct a new map with the message name so a cycle further down the recursive path can be detected. // Do not keep anything in the original touched reference and do not pass that reference along. This will // prevent clobbering adjacent records while recursing. - touchedOut := make(map[string]bool) - for k, v := range touchedIn { - touchedOut[k] = v - } - touchedOut[*msg.Name] = true + touchedOut := cycle.Branch() for _, nestedField := range msg.Fields { - var fieldName string - if reg.GetUseJSONNamesForFields() { - fieldName = field.GetJsonName() - } else { - fieldName = field.GetName() - } + fieldName := reg.FieldName(field) p, err := nestedQueryParams(msg, nestedField, prefix+fieldName+".", reg, pathParams, body, touchedOut) if err != nil { return nil, err @@ -341,7 +375,121 @@ func skipRenderingRef(refName string) bool { return ok } -func renderMessagesAsDefinition(messages messageMap, d openapiDefinitionsObject, reg *descriptor.Registry, customRefs refMap) { +func renderMessageAsDefinition(msg *descriptor.Message, reg *descriptor.Registry, customRefs refMap, excludeFields []*descriptor.Field) openapiSchemaObject { + schema := openapiSchemaObject{ + schemaCore: schemaCore{ + Type: "object", + }, + } + msgComments := protoComments(reg, msg.File, msg.Outers, "MessageType", int32(msg.Index)) + if err := updateOpenAPIDataFromComments(reg, &schema, msg, msgComments, false); err != nil { + panic(err) + } + opts, err := getMessageOpenAPIOption(reg, msg) + if err != nil { + panic(err) + } + if opts != nil { + protoSchema := openapiSchemaFromProtoSchema(opts, reg, customRefs, msg) + + // Warning: Make sure not to overwrite any fields already set on the schema type. + schema.ExternalDocs = protoSchema.ExternalDocs + schema.ReadOnly = protoSchema.ReadOnly + schema.MultipleOf = protoSchema.MultipleOf + schema.Maximum = protoSchema.Maximum + schema.ExclusiveMaximum = protoSchema.ExclusiveMaximum + schema.Minimum = protoSchema.Minimum + schema.ExclusiveMinimum = protoSchema.ExclusiveMinimum + schema.MaxLength = protoSchema.MaxLength + schema.MinLength = protoSchema.MinLength + schema.Pattern = protoSchema.Pattern + schema.Default = protoSchema.Default + schema.MaxItems = protoSchema.MaxItems + schema.MinItems = protoSchema.MinItems + schema.UniqueItems = protoSchema.UniqueItems + schema.MaxProperties = protoSchema.MaxProperties + schema.MinProperties = protoSchema.MinProperties + schema.Required = protoSchema.Required + schema.XNullable = protoSchema.XNullable + if protoSchema.schemaCore.Type != "" || protoSchema.schemaCore.Ref != "" { + schema.schemaCore = protoSchema.schemaCore + } + if protoSchema.Title != "" { + schema.Title = protoSchema.Title + } + if protoSchema.Description != "" { + schema.Description = protoSchema.Description + } + if protoSchema.Example != nil { + schema.Example = protoSchema.Example + } + } + + schema.Required = filterOutExcludedFields(schema.Required, excludeFields, reg) + + for _, f := range msg.Fields { + if shouldExcludeField(reg.FieldName(f), excludeFields, reg) { + continue + } + fieldValue := schemaOfField(f, reg, customRefs) + comments := fieldProtoComments(reg, msg, f) + if err := updateOpenAPIDataFromComments(reg, &fieldValue, f, comments, false); err != nil { + panic(err) + } + + if requiredIdx := find(schema.Required, *f.Name); requiredIdx != -1 && reg.GetUseJSONNamesForFields() { + schema.Required[requiredIdx] = f.GetJsonName() + } + + if fieldValue.Required != nil { + for _, req := range fieldValue.Required { + if reg.GetUseJSONNamesForFields() { + schema.Required = append(schema.Required, f.GetJsonName()) + } else { + schema.Required = append(schema.Required, req) + } + } + } + + kv := keyVal{Value: fieldValue} + kv.Key = reg.FieldName(f) + if schema.Properties == nil { + schema.Properties = &openapiSchemaObjectProperties{} + } + *schema.Properties = append(*schema.Properties, kv) + } + + if msg.FQMN() == ".google.protobuf.Any" { + transformAnyForJSON(&schema, reg.GetUseJSONNamesForFields()) + } + + return schema +} + +// transformAnyForJSON should be called when the schema object represents a google.protobuf.Any, and will replace the +// Properties slice with a single value for '@type'. We mutate the incorrectly named field so that we inherit the same +// documentation as specified on the original field in the protobuf descriptors. +func transformAnyForJSON(schema *openapiSchemaObject, useJSONNames bool) { + var typeFieldName string + if useJSONNames { + typeFieldName = "typeUrl" + } else { + typeFieldName = "type_url" + } + + for _, property := range *schema.Properties { + if property.Key == typeFieldName { + schema.AdditionalProperties = &openapiSchemaObject{} + schema.Properties = &openapiSchemaObjectProperties{keyVal{ + Key: "@type", + Value: property.Value, + }} + break + } + } +} + +func renderMessagesAsDefinition(messages messageMap, d openapiDefinitionsObject, reg *descriptor.Registry, customRefs refMap, excludeFields []*descriptor.Field) { for name, msg := range messages { swgName, ok := fullyQualifiedNameToOpenAPIName(msg.FQMN(), reg) if !ok { @@ -354,88 +502,26 @@ func renderMessagesAsDefinition(messages messageMap, d openapiDefinitionsObject, if opt := msg.GetOptions(); opt != nil && opt.MapEntry != nil && *opt.MapEntry { continue } - schema := openapiSchemaObject{ - schemaCore: schemaCore{ - Type: "object", - }, - } - msgComments := protoComments(reg, msg.File, msg.Outers, "MessageType", int32(msg.Index)) - if err := updateOpenAPIDataFromComments(reg, &schema, msg, msgComments, false); err != nil { - panic(err) - } - opts, err := getMessageOpenAPIOption(reg, msg) - if err != nil { - panic(err) - } - if opts != nil { - protoSchema := openapiSchemaFromProtoSchema(opts, reg, customRefs, msg) - - // Warning: Make sure not to overwrite any fields already set on the schema type. - schema.ExternalDocs = protoSchema.ExternalDocs - schema.ReadOnly = protoSchema.ReadOnly - schema.MultipleOf = protoSchema.MultipleOf - schema.Maximum = protoSchema.Maximum - schema.ExclusiveMaximum = protoSchema.ExclusiveMaximum - schema.Minimum = protoSchema.Minimum - schema.ExclusiveMinimum = protoSchema.ExclusiveMinimum - schema.MaxLength = protoSchema.MaxLength - schema.MinLength = protoSchema.MinLength - schema.Pattern = protoSchema.Pattern - schema.Default = protoSchema.Default - schema.MaxItems = protoSchema.MaxItems - schema.MinItems = protoSchema.MinItems - schema.UniqueItems = protoSchema.UniqueItems - schema.MaxProperties = protoSchema.MaxProperties - schema.MinProperties = protoSchema.MinProperties - schema.Required = protoSchema.Required - if protoSchema.schemaCore.Type != "" || protoSchema.schemaCore.Ref != "" { - schema.schemaCore = protoSchema.schemaCore - } - if protoSchema.Title != "" { - schema.Title = protoSchema.Title - } - if protoSchema.Description != "" { - schema.Description = protoSchema.Description - } - if protoSchema.Example != nil { - schema.Example = protoSchema.Example - } - } - - for _, f := range msg.Fields { - fieldValue := schemaOfField(f, reg, customRefs) - comments := fieldProtoComments(reg, msg, f) - if err := updateOpenAPIDataFromComments(reg, &fieldValue, f, comments, false); err != nil { - panic(err) - } - - if requiredIdx := find(schema.Required, *f.Name); requiredIdx != -1 && reg.GetUseJSONNamesForFields() { - schema.Required[requiredIdx] = f.GetJsonName() - } - - if fieldValue.Required != nil { - for _, req := range fieldValue.Required { - if reg.GetUseJSONNamesForFields() { - schema.Required = append(schema.Required, f.GetJsonName()) - } else { - schema.Required = append(schema.Required, req) - } - } - } + d[swgName] = renderMessageAsDefinition(msg, reg, customRefs, excludeFields) + } +} - kv := keyVal{Value: fieldValue} - if reg.GetUseJSONNamesForFields() { - kv.Key = f.GetJsonName() - } else { - kv.Key = f.GetName() - } - if schema.Properties == nil { - schema.Properties = &openapiSchemaObjectProperties{} - } - *schema.Properties = append(*schema.Properties, kv) +func shouldExcludeField(name string, excluded []*descriptor.Field, reg *descriptor.Registry) bool { + for _, f := range excluded { + if name == reg.FieldName(f) { + return true } - d[swgName] = schema } + return false +} +func filterOutExcludedFields(fields []string, excluded []*descriptor.Field, reg *descriptor.Registry) []string { + var filtered []string + for _, f := range fields { + if !shouldExcludeField(f, excluded, reg) { + filtered = append(filtered, f) + } + } + return filtered } // schemaOfField returns a OpenAPI Schema Object for a protobuf field. @@ -524,6 +610,10 @@ func schemaOfField(f *descriptor.Field, reg *descriptor.Registry, refs refMap) o updateSwaggerObjectFromFieldBehavior(&ret, j, f) } + if reg.GetProto3OptionalNullable() && f.GetProto3Optional() { + ret.XNullable = true + } + return ret } @@ -623,7 +713,7 @@ func fullyQualifiedNameToOpenAPIName(fqn string, reg *descriptor.Registry) (stri ret, ok := mapping[fqn] return ret, ok } - mapping := resolveFullyQualifiedNameToOpenAPINames(append(reg.GetAllFQMNs(), reg.GetAllFQENs()...), reg.GetUseFQNForOpenAPIName()) + mapping := resolveFullyQualifiedNameToOpenAPINames(append(reg.GetAllFQMNs(), reg.GetAllFQENs()...), reg.GetOpenAPINamingStrategy()) registriesSeen[reg] = mapping ret, ok := mapping[fqn] return ret, ok @@ -648,59 +738,13 @@ func lookupMsgAndOpenAPIName(location, name string, reg *descriptor.Registry) (* var registriesSeen = map[*descriptor.Registry]map[string]string{} var registriesSeenMutex sync.Mutex -// Take the names of every proto and "uniq-ify" them. The idea is to produce a -// set of names that meet a couple of conditions. They must be stable, they -// must be unique, and they must be shorter than the FQN. -// -// This likely could be made better. This will always generate the same names -// but may not always produce optimal names. This is a reasonably close -// approximation of what they should look like in most cases. -func resolveFullyQualifiedNameToOpenAPINames(messages []string, useFQNForOpenAPIName bool) map[string]string { - packagesByDepth := make(map[int][][]string) - uniqueNames := make(map[string]string) - - hierarchy := func(pkg string) []string { - return strings.Split(pkg, ".") - } - - for _, p := range messages { - h := hierarchy(p) - for depth := range h { - if _, ok := packagesByDepth[depth]; !ok { - packagesByDepth[depth] = make([][]string, 0) - } - packagesByDepth[depth] = append(packagesByDepth[depth], h[len(h)-depth:]) - } - } - - count := func(list [][]string, item []string) int { - i := 0 - for _, element := range list { - if reflect.DeepEqual(element, item) { - i++ - } - } - return i - } - - for _, p := range messages { - if useFQNForOpenAPIName { - // strip leading dot from proto fqn - uniqueNames[p] = p[1:] - } else { - h := hierarchy(p) - for depth := 0; depth < len(h); depth++ { - if count(packagesByDepth[depth], h[len(h)-depth:]) == 1 { - uniqueNames[p] = strings.Join(h[len(h)-depth-1:], "") - break - } - if depth == len(h)-1 { - uniqueNames[p] = strings.Join(h, "") - } - } - } +// Take the names of every proto message and generate a unique reference for each, according to the given strategy. +func resolveFullyQualifiedNameToOpenAPINames(messages []string, namingStrategy string) map[string]string { + strategyFn := LookupNamingStrategy(namingStrategy) + if strategyFn == nil { + return nil } - return uniqueNames + return strategyFn(messages) } var canRegexp = regexp.MustCompile("{([a-zA-Z][a-zA-Z0-9_.]*).*}") @@ -919,9 +963,25 @@ func renderServices(services []*descriptor.Service, paths openapiPathsObject, re wknSchemaCore, isWkn := wktSchemas[meth.RequestType.FQMN()] if !isWkn { - err := schema.setRefFromFQN(meth.RequestType.FQMN(), reg) - if err != nil { - return err + var bodyExcludedFields []*descriptor.Field + if len(b.PathParams) != 0 { + for _, p := range b.PathParams { + // We only support excluding top-level fields captured by path parameters. + if len(p.FieldPath) == 1 { + bodyExcludedFields = append(bodyExcludedFields, p.FieldPath[0].Target) + } + } + } + if len(bodyExcludedFields) != 0 { + schema = renderMessageAsDefinition(meth.RequestType, reg, customRefs, bodyExcludedFields) + if schema.Properties == nil || len(*schema.Properties) == 0 { + glog.Errorf("created a body with 0 properties in the message, this might be unintended: %s", *meth.RequestType) + } + } else { + err := schema.setRefFromFQN(meth.RequestType.FQMN(), reg) + if err != nil { + return err + } } } else { schema.schemaCore = wknSchemaCore @@ -1246,7 +1306,7 @@ func applyTemplate(p param) (*openapiSwaggerObject, error) { // Find all the service's messages and enumerations that are defined (recursively) // and write request, response and other custom (but referenced) types out as definition objects. findServicesMessagesAndEnumerations(p.Services, p.reg, messages, streamingMessages, enums, requestResponseRefs) - renderMessagesAsDefinition(messages, s.Definitions, p.reg, customRefs) + renderMessagesAsDefinition(messages, s.Definitions, p.reg, customRefs, nil) renderEnumerationsAsDefinition(enums, s.Definitions, p.reg) // File itself might have some comments and metadata. @@ -2303,7 +2363,7 @@ func addCustomRefs(d openapiDefinitionsObject, reg *descriptor.Registry, refs re // ?? Should be either enum or msg } - renderMessagesAsDefinition(msgMap, d, reg, refs) + renderMessagesAsDefinition(msgMap, d, reg, refs, nil) renderEnumerationsAsDefinition(enumMap, d, reg) // Run again in case any new refs were added diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/types.go index 505cfc686..861c4c733 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/types.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopenapi/types.go @@ -150,12 +150,18 @@ type openapiParameterObject struct { } // core part of schema, which is common to itemsObject and schemaObject. -// http://swagger.io/specification/#itemsObject +// http://swagger.io/specification/v2/#itemsObject +// The OAS3 spec (https://swagger.io/specification/#schemaObject) defines the +// `nullable` field as part of a Schema Object. This behavior has been +// "back-ported" to OAS2 as the Specification Extension `x-nullable`, and is +// supported by generation tools such as swagger-codegen and go-swagger. +// For protoc-gen-openapiv3, we'd want to add `nullable` instead. type schemaCore struct { - Type string `json:"type,omitempty"` - Format string `json:"format,omitempty"` - Ref string `json:"$ref,omitempty"` - Example json.RawMessage `json:"example,omitempty"` + Type string `json:"type,omitempty"` + Format string `json:"format,omitempty"` + Ref string `json:"$ref,omitempty"` + XNullable bool `json:"x-nullable,omitempty"` + Example json.RawMessage `json:"example,omitempty"` Items *openapiItemsObject `json:"items,omitempty"` diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/main.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/main.go index 2b8a6f773..66811110e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/main.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/main.go @@ -26,13 +26,16 @@ var ( versionFlag = flag.Bool("version", false, "print the current version") allowRepeatedFieldsInBody = flag.Bool("allow_repeated_fields_in_body", false, "allows to use repeated field in `body` and `response_body` field of `google.api.http` annotation option") includePackageInTags = flag.Bool("include_package_in_tags", false, "if unset, the gRPC service name is added to the `Tags` field of each operation. If set and the `package` directive is shown in the proto file, the package name will be prepended to the service name") - useFQNForOpenAPIName = flag.Bool("fqn_for_openapi_name", false, "if set, the object's OpenAPI names will use the fully qualified names from the proto definition (ie my.package.MyMessage.MyInnerMessage") + useFQNForOpenAPIName = flag.Bool("fqn_for_openapi_name", false, "if set, the object's OpenAPI names will use the fully qualified names from the proto definition (ie my.package.MyMessage.MyInnerMessage). DEPRECATED: prefer `openapi_naming_strategy=fqn`") + openAPINamingStrategy = flag.String("openapi_naming_strategy", "", "use the given OpenAPI naming strategy. Allowed values are `legacy`, `fqn`, `simple`. If unset, either `legacy` or `fqn` are selected, depending on the value of the `fqn_for_openapi_name` flag") useGoTemplate = flag.Bool("use_go_templates", false, "if set, you can use Go templates in protofile comments") disableDefaultErrors = flag.Bool("disable_default_errors", false, "if set, disables generation of default errors. This is useful if you have defined custom error handling") enumsAsInts = flag.Bool("enums_as_ints", false, "whether to render enum values as integers, as opposed to string values") simpleOperationIDs = flag.Bool("simple_operation_ids", false, "whether to remove the service prefix in the operationID generation. Can introduce duplicate operationIDs, use with caution.") + proto3OptionalNullable = flag.Bool("proto3_optional_nullable", false, "whether Proto3 Optional fields should be marked as x-nullable") openAPIConfiguration = flag.String("openapi_configuration", "", "path to file which describes the OpenAPI Configuration in YAML format") generateUnboundMethods = flag.Bool("generate_unbound_methods", false, "generate swagger metadata even for RPC methods that have no HttpRule annotation") + recursiveDepth = flag.Int("recursive-depth", 1000, "maximum recursion count allowed for a field type") ) // Variables set by goreleaser at build time @@ -83,12 +86,32 @@ func main() { reg.SetUseJSONNamesForFields(*useJSONNamesForFields) reg.SetAllowRepeatedFieldsInBody(*allowRepeatedFieldsInBody) reg.SetIncludePackageInTags(*includePackageInTags) + reg.SetUseFQNForOpenAPIName(*useFQNForOpenAPIName) + // Set the naming strategy either directly from the flag, or via the value of the legacy fqn_for_openapi_name + // flag. + namingStrategy := *openAPINamingStrategy + if *useFQNForOpenAPIName { + if namingStrategy != "" { + glog.Fatal("The deprecated `fqn_for_openapi_name` flag must remain unset if `openapi_naming_strategy` is set.") + } + glog.Warning("The `fqn_for_openapi_name` flag is deprecated. Please use `openapi_naming_strategy=fqn` instead.") + namingStrategy = "fqn" + } else if namingStrategy == "" { + namingStrategy = "legacy" + } + if strategyFn := genopenapi.LookupNamingStrategy(namingStrategy); strategyFn == nil { + emitError(fmt.Errorf("invalid naming strategy %q", namingStrategy)) + return + } + reg.SetOpenAPINamingStrategy(namingStrategy) reg.SetUseGoTemplate(*useGoTemplate) reg.SetEnumsAsInts(*enumsAsInts) reg.SetDisableDefaultErrors(*disableDefaultErrors) reg.SetSimpleOperationIDs(*simpleOperationIDs) + reg.SetProto3OptionalNullable(*proto3OptionalNullable) reg.SetGenerateUnboundMethods(*generateUnboundMethods) + reg.SetRecursiveDepth(*recursiveDepth) if err := reg.SetRepeatedPathParamSeparator(*repeatedPathParamSeparator); err != nil { emitError(err) return diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel index 363699941..1ae1c003c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel @@ -13,7 +13,7 @@ filegroup( ) go_library( - name = "go_default_library", + name = "options", embed = [":options_go_proto"], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options", ) @@ -36,3 +36,9 @@ go_proto_library( importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options", proto = ":options_proto", ) + +alias( + name = "go_default_library", + actual = ":options", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go index a6b02064a..c6b16fc60 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go @@ -1,15 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.13.0 +// protoc-gen-go v1.27.1 +// protoc v3.17.3 // source: protoc-gen-openapiv2/options/annotations.proto package options import ( - descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" reflect "reflect" ) @@ -22,7 +22,7 @@ const ( var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ { - ExtendedType: (*descriptor.FileOptions)(nil), + ExtendedType: (*descriptorpb.FileOptions)(nil), ExtensionType: (*Swagger)(nil), Field: 1042, Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger", @@ -30,7 +30,7 @@ var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.E Filename: "protoc-gen-openapiv2/options/annotations.proto", }, { - ExtendedType: (*descriptor.MethodOptions)(nil), + ExtendedType: (*descriptorpb.MethodOptions)(nil), ExtensionType: (*Operation)(nil), Field: 1042, Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation", @@ -38,7 +38,7 @@ var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.E Filename: "protoc-gen-openapiv2/options/annotations.proto", }, { - ExtendedType: (*descriptor.MessageOptions)(nil), + ExtendedType: (*descriptorpb.MessageOptions)(nil), ExtensionType: (*Schema)(nil), Field: 1042, Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema", @@ -46,7 +46,7 @@ var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.E Filename: "protoc-gen-openapiv2/options/annotations.proto", }, { - ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtendedType: (*descriptorpb.ServiceOptions)(nil), ExtensionType: (*Tag)(nil), Field: 1042, Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag", @@ -54,7 +54,7 @@ var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.E Filename: "protoc-gen-openapiv2/options/annotations.proto", }, { - ExtendedType: (*descriptor.FieldOptions)(nil), + ExtendedType: (*descriptorpb.FieldOptions)(nil), ExtensionType: (*JSONSchema)(nil), Field: 1042, Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field", @@ -63,7 +63,7 @@ var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.E }, } -// Extension fields to descriptor.FileOptions. +// Extension fields to descriptorpb.FileOptions. var ( // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. // @@ -74,7 +74,7 @@ var ( E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0] ) -// Extension fields to descriptor.MethodOptions. +// Extension fields to descriptorpb.MethodOptions. var ( // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. // @@ -85,7 +85,7 @@ var ( E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1] ) -// Extension fields to descriptor.MessageOptions. +// Extension fields to descriptorpb.MessageOptions. var ( // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. // @@ -96,7 +96,7 @@ var ( E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2] ) -// Extension fields to descriptor.ServiceOptions. +// Extension fields to descriptorpb.ServiceOptions. var ( // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. // @@ -107,7 +107,7 @@ var ( E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3] ) -// Extension fields to descriptor.FieldOptions. +// Extension fields to descriptorpb.FieldOptions. var ( // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. // @@ -180,16 +180,16 @@ var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{ } var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []interface{}{ - (*descriptor.FileOptions)(nil), // 0: google.protobuf.FileOptions - (*descriptor.MethodOptions)(nil), // 1: google.protobuf.MethodOptions - (*descriptor.MessageOptions)(nil), // 2: google.protobuf.MessageOptions - (*descriptor.ServiceOptions)(nil), // 3: google.protobuf.ServiceOptions - (*descriptor.FieldOptions)(nil), // 4: google.protobuf.FieldOptions - (*Swagger)(nil), // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger - (*Operation)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Operation - (*Schema)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Schema - (*Tag)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Tag - (*JSONSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.ServiceOptions)(nil), // 3: google.protobuf.ServiceOptions + (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions + (*Swagger)(nil), // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Schema)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*Tag)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*JSONSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema } var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{ 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json index e1c57d8cb..c08c00fb1 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json @@ -15,14 +15,11 @@ "protobufAny": { "type": "object", "properties": { - "typeUrl": { + "@type": { "type": "string" - }, - "value": { - "type": "string", - "format": "byte" } - } + }, + "additionalProperties": {} }, "rpcStatus": { "type": "object", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go index f393af01c..06c6c56ea 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go @@ -1,15 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.13.0 +// protoc-gen-go v1.27.1 +// protoc v3.17.3 // source: protoc-gen-openapiv2/options/openapiv2.proto package options import ( - _struct "github.com/golang/protobuf/ptypes/struct" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" ) @@ -379,8 +379,8 @@ type Swagger struct { // Individual operations can override this definition. Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` // Additional external documentation. - ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Extensions map[string]*_struct.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Swagger) Reset() { @@ -492,7 +492,7 @@ func (x *Swagger) GetExternalDocs() *ExternalDocumentation { return nil } -func (x *Swagger) GetExtensions() map[string]*_struct.Value { +func (x *Swagger) GetExtensions() map[string]*structpb.Value { if x != nil { return x.Extensions } @@ -568,8 +568,8 @@ type Operation struct { // (that is, there is a logical OR between the security requirements). This // definition overrides any declared top-level security. To remove a top-level // security declaration, an empty array can be used. - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - Extensions map[string]*_struct.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Operation) Reset() { @@ -681,7 +681,7 @@ func (x *Operation) GetSecurity() []*SecurityRequirement { return nil } -func (x *Operation) GetExtensions() map[string]*_struct.Value { +func (x *Operation) GetExtensions() map[string]*structpb.Value { if x != nil { return x.Extensions } @@ -799,8 +799,8 @@ type Response struct { Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // `Examples` gives per-mimetype response examples. // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object - Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Extensions map[string]*_struct.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Response) Reset() { @@ -863,7 +863,7 @@ func (x *Response) GetExamples() map[string]string { return nil } -func (x *Response) GetExtensions() map[string]*_struct.Value { +func (x *Response) GetExtensions() map[string]*structpb.Value { if x != nil { return x.Extensions } @@ -912,8 +912,8 @@ type Info struct { License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"` // Provides the version of the application API (not to be confused // with the specification version). - Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` - Extensions map[string]*_struct.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Info) Reset() { @@ -990,7 +990,7 @@ func (x *Info) GetVersion() string { return "" } -func (x *Info) GetExtensions() map[string]*_struct.Value { +func (x *Info) GetExtensions() map[string]*structpb.Value { if x != nil { return x.Extensions } @@ -1766,8 +1766,8 @@ type SecurityScheme struct { TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` // The available scopes for the OAuth2 security scheme. // Valid for oauth2. - Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"` - Extensions map[string]*_struct.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"` + Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *SecurityScheme) Reset() { @@ -1858,7 +1858,7 @@ func (x *SecurityScheme) GetScopes() *Scopes { return nil } -func (x *SecurityScheme) GetExtensions() map[string]*_struct.Value { +func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value { if x != nil { return x.Extensions } @@ -2511,9 +2511,9 @@ var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []interface{}{ nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry (*SecurityRequirement_SecurityRequirementValue)(nil), // 30: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue - nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry - nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry - (*_struct.Value)(nil), // 33: google.protobuf.Value + nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + (*structpb.Value)(nil), // 33: google.protobuf.Value } var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{ 9, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json index c6aad71c9..3f4f97c2d 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json @@ -15,14 +15,11 @@ "protobufAny": { "type": "object", "properties": { - "typeUrl": { + "@type": { "type": "string" - }, - "value": { - "type": "string", - "format": "byte" } - } + }, + "additionalProperties": {} }, "rpcStatus": { "type": "object", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index 68e5f4408..95f867a52 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") package(default_visibility = ["//visibility:public"]) go_library( - name = "go_default_library", + name = "runtime", srcs = [ "context.go", "convert.go", @@ -24,27 +24,26 @@ go_library( ], importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime", deps = [ - "//internal/httprule:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "//internal/httprule", + "//utilities", "@go_googleapis//google/api:httpbody_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//grpclog:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - "@org_golang_google_protobuf//encoding/protojson:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", - "@org_golang_google_protobuf//reflect/protoregistry:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", ], ) go_test( - name = "go_default_test", + name = "runtime_test", size = "small", srcs = [ "context_test.go", @@ -61,27 +60,32 @@ go_test( "pattern_test.go", "query_test.go", ], - embed = [":go_default_library"], + embed = [":runtime"], deps = [ - "//runtime/internal/examplepb:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@com_github_google_go_cmp//cmp:go_default_library", - "@com_github_google_go_cmp//cmp/cmpopts:go_default_library", + "//runtime/internal/examplepb", + "//utilities", + "@com_github_google_go_cmp//cmp", + "@com_github_google_go_cmp//cmp/cmpopts", "@go_googleapis//google/api:httpbody_go_proto", "@go_googleapis//google/rpc:errdetails_go_proto", "@go_googleapis//google/rpc:status_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:empty_go_proto", "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - "@org_golang_google_protobuf//encoding/protojson:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//testing/protocmp:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", ], ) + +alias( + name = "go_default_library", + actual = ":runtime", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index f42243a00..fb57b9366 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -41,7 +41,18 @@ var ( DefaultContextTimeout = 0 * time.Second ) -type rpcMethodKey struct{} +type ( + rpcMethodKey struct{} + httpPathPatternKey struct{} + + AnnotateContextOption func(ctx context.Context) context.Context +) + +func WithHTTPPathPattern(pattern string) AnnotateContextOption { + return func(ctx context.Context) context.Context { + return withHTTPPathPattern(ctx, pattern) + } +} func decodeBinHeader(v string) ([]byte, error) { if len(v)%4 == 0 { @@ -58,8 +69,8 @@ At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", except that the forwarded destination is not another HTTP service but rather a gRPC service. */ -func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string) (context.Context, error) { - ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName) +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) if err != nil { return nil, err } @@ -72,8 +83,8 @@ func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM // AnnotateIncomingContext adds context information such as metadata from the request. // Attach metadata as incoming context. -func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string) (context.Context, error) { - ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName) +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) if err != nil { return nil, err } @@ -84,8 +95,11 @@ func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Reque return metadata.NewIncomingContext(ctx, md), nil } -func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string) (context.Context, metadata.MD, error) { +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) { ctx = withRPCMethod(ctx, rpcMethodName) + for _, o := range options { + ctx = o(ctx) + } var pairs []string timeout := DefaultContextTimeout if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { @@ -311,3 +325,21 @@ func RPCMethod(ctx context.Context) (string, bool) { func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context { return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName) } + +// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists. +// The format of the returned string is defined by the google.api.http path template type. +func HTTPPathPattern(ctx context.Context) (string, bool) { + m := ctx.Value(httpPathPatternKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { + return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index e122dd339..2deef8b48 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -6,10 +6,10 @@ import ( "strconv" "strings" - durationpb "github.com/golang/protobuf/ptypes/duration" - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" ) // String just returns the given string. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 9f2074533..d9e0013c4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -2,9 +2,9 @@ package runtime import ( "context" + "errors" "io" "net/http" - "strings" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" @@ -20,6 +20,17 @@ type StreamErrorHandlerFunc func(context.Context, error) *status.Status // RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors. type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int) +// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error +// passed to the DefaultRoutingErrorHandler. +type HTTPStatusError struct { + HTTPStatus int + Err error +} + +func (e *HTTPStatusError) Error() string { + return e.Err.Error() +} + // HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. // See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto func HTTPStatusFromCode(code codes.Code) int { @@ -72,6 +83,10 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. +// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is +// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler +// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode +// are insufficient for. // If otherwise, it replies with http.StatusInternalServerError. // // The response body written by this function is a Status message marshaled by the Marshaler. @@ -79,6 +94,11 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh // return Internal when Marshal failed const fallback = `{"code": 13, "message": "failed to marshal error message"}` + var customStatus *HTTPStatusError + if errors.As(err, &customStatus) { + err = customStatus.Err + } + s := status.Convert(err) pb := s.Proto() @@ -88,6 +108,10 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh contentType := marshaler.ContentType(pb) w.Header().Set("Content-Type", contentType) + if s.Code() == codes.Unauthenticated { + w.Header().Set("WWW-Authenticate", s.Message()) + } + buf, merr := marshaler.Marshal(pb) if merr != nil { grpclog.Infof("Failed to marshal error message %q: %v", s, merr) @@ -110,21 +134,24 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh // is acceptable, as described in Section 4.3, a server SHOULD NOT // generate trailer fields that it believes are necessary for the user // agent to receive. - var wantsTrailers bool + doForwardTrailers := requestAcceptsTrailers(r) - if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") { - wantsTrailers = true + if doForwardTrailers { handleForwardResponseTrailerHeader(w, md) w.Header().Set("Transfer-Encoding", "chunked") } st := HTTPStatusFromCode(s.Code()) + if customStatus != nil { + st = customStatus.HTTPStatus + } + w.WriteHeader(st) if _, err := w.Write(buf); err != nil { grpclog.Infof("Failed to write response: %v", err) } - if wantsTrailers { + if doForwardTrailers { handleForwardResponseTrailer(w, md) } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 0445fda28..0138ed2f7 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -63,6 +63,17 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field continue } + if isProtobufAnyMessage(fd.Message()) { + _, hasTypeField := v.(map[string]interface{})["@type"] + if hasTypeField { + queue = append(queue, fieldMaskPathItem{path: k}) + continue + } else { + return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName()) + } + + } + child := fieldMaskPathItem{ node: v, } @@ -97,6 +108,10 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field return fm, nil } +func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Any") +} + func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool { return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value") } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 2628c2b1c..d1e21df48 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "net/textproto" + "strings" "google.golang.org/genproto/googleapis/api/httpbody" "google.golang.org/grpc/codes" @@ -137,6 +138,19 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha } handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(req) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + handleForwardResponseTrailerHeader(w, md) contentType := marshaler.ContentType(resp) @@ -163,7 +177,14 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha grpclog.Infof("Failed to write response: %v", err) } - handleForwardResponseTrailer(w, md) + if doForwardTrailers { + handleForwardResponseTrailer(w, md) + } +} + +func requestAcceptsTrailers(req *http.Request) bool { + te := req.Header.Get("TE") + return strings.Contains(strings.ToLower(te), "trailers") } func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { @@ -181,10 +202,12 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(msg)) w.WriteHeader(HTTPStatusFromCode(st.Code())) } - buf, merr := marshaler.Marshal(errorChunk(st)) + buf, merr := marshaler.Marshal(msg) if merr != nil { grpclog.Infof("Failed to marshal an error: %v", merr) return diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index 38ac3ddff..7387c8e39 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "reflect" + "strconv" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" @@ -113,6 +114,36 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { return buf.Bytes(), nil } + + if rv.Type().Elem().Implements(typeProtoEnum) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if j.UseEnumNumbers { + _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10)) + } else { + _, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"") + } + if err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } } if rv.Kind() == reflect.Map { @@ -237,6 +268,10 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions } bk := result[0] bv := reflect.New(rv.Type().Elem()) + if v == nil { + null := json.RawMessage("null") + v = &null + } if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil { return err } @@ -285,6 +320,8 @@ type protoEnum interface { EnumDescriptor() ([]byte, []int) } +var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem() + var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() // Delimiter for newline encoded JSON streams. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 10fd357b1..cfbe2306a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -178,7 +178,7 @@ func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { } // HandlePath allows users to configure custom path handlers. -// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/inject_router.html +// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/ func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error { compiler, err := httprule.Parse(pathPattern) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index 1365a4dd9..fb0c84ef0 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -10,14 +10,15 @@ import ( "strings" "time" - "github.com/golang/protobuf/ptypes" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/genproto/protobuf/field_mask" "google.golang.org/grpc/grpclog" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" ) var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`) @@ -256,10 +257,7 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } - msg, err = ptypes.TimestampProto(t) - if err != nil { - return protoreflect.Value{}, err - } + msg = timestamppb.New(t) case "google.protobuf.Duration": if value == "null" { break @@ -268,7 +266,7 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } - msg = ptypes.DurationProto(d) + msg = durationpb.New(d) case "google.protobuf.DoubleValue": v, err := strconv.ParseFloat(value, 64) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel index 6bb7df467..f118ab323 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") package(default_visibility = ["//visibility:public"]) go_library( - name = "go_default_library", + name = "utilities", srcs = [ "doc.go", "pattern.go", @@ -14,8 +14,14 @@ go_library( ) go_test( - name = "go_default_test", + name = "utilities_test", size = "small", srcs = ["trie_test.go"], - embed = [":go_default_library"], + embed = [":utilities"], +) + +alias( + name = "go_default_library", + actual = ":utilities", + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/heroiclabs/nakama-common/rtapi/realtime.pb.go b/vendor/github.com/heroiclabs/nakama-common/rtapi/realtime.pb.go index 8596d7b42..66f237e31 100644 --- a/vendor/github.com/heroiclabs/nakama-common/rtapi/realtime.pb.go +++ b/vendor/github.com/heroiclabs/nakama-common/rtapi/realtime.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.17.3 // source: realtime.proto package rtapi import ( - proto "github.com/golang/protobuf/proto" api "github.com/heroiclabs/nakama-common/api" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The type of chat channel. type ChannelJoin_Type int32 @@ -1790,6 +1785,9 @@ type MatchCreate struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // Optional name to use when creating the match. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (x *MatchCreate) Reset() { @@ -1824,6 +1822,13 @@ func (*MatchCreate) Descriptor() ([]byte, []int) { return file_realtime_proto_rawDescGZIP(), []int{11} } +func (x *MatchCreate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + // Realtime match data received from the server. type MatchData struct { state protoimpl.MessageState @@ -4696,70 +4701,220 @@ var file_realtime_proto_rawDesc = []byte{ 0x31, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x73, 0x65, - 0x6c, 0x66, 0x22, 0x0d, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x09, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x6c, 0x66, 0x22, 0x21, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x09, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x39, + 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, + 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6f, 0x70, 0x5f, + 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x70, 0x43, 0x6f, + 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x22, 0xb0, 0x01, 0x0a, 0x0d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x65, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, + 0x17, 0x0a, 0x07, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x06, 0x6f, 0x70, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x09, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x09, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x6c, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x09, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4a, + 0x6f, 0x69, 0x6e, 0x12, 0x1b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, + 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6e, 0x61, 0x6b, + 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x4a, 0x6f, 0x69, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, + 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x04, 0x0a, 0x02, 0x69, + 0x64, 0x22, 0x27, 0x0a, 0x0a, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x72, - 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, - 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, - 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x70, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x64, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x70, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x22, 0xb0, - 0x01, 0x0a, 0x0d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x6e, 0x64, - 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6f, - 0x70, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x70, - 0x43, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x73, - 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, + 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x22, 0x9b, 0x01, 0x0a, 0x12, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x05, + 0x6a, 0x6f, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, - 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x73, - 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x69, 0x61, 0x62, 0x6c, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x69, 0x61, 0x62, 0x6c, - 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x09, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, 0x69, 0x6e, 0x12, - 0x1b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x05, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, - 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, - 0x69, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x04, 0x0a, 0x02, 0x69, 0x64, 0x22, 0x27, 0x0a, - 0x0a, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x22, 0x9b, 0x01, 0x0a, 0x12, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, - 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x05, 0x6a, 0x6f, 0x69, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, - 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, - 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x6a, 0x6f, 0x69, 0x6e, 0x73, 0x12, 0x35, 0x0a, - 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x6c, 0x65, - 0x61, 0x76, 0x65, 0x73, 0x22, 0xb3, 0x03, 0x0a, 0x0d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, - 0x6b, 0x65, 0x72, 0x41, 0x64, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x61, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x34, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x41, 0x64, - 0x64, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x12, 0x6e, 0x75, 0x6d, - 0x65, 0x72, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, + 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x6a, 0x6f, 0x69, 0x6e, + 0x73, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x22, 0xb3, 0x03, 0x0a, 0x0d, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x41, 0x64, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, + 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, + 0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x61, 0x0a, 0x11, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, + 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, + 0x65, 0x72, 0x41, 0x64, 0x64, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x64, 0x0a, + 0x12, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6e, 0x61, 0x6b, 0x61, + 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x41, 0x64, 0x64, 0x2e, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, + 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x11, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x1a, 0x43, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x44, 0x0a, 0x16, 0x4e, 0x75, 0x6d, 0x65, + 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd9, + 0x05, 0x0a, 0x11, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x08, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x47, 0x0a, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x64, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x55, + 0x73, 0x65, 0x72, 0x52, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x12, 0x45, 0x0a, 0x04, 0x73, 0x65, + 0x6c, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, + 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, 0x52, 0x04, 0x73, 0x65, 0x6c, + 0x66, 0x1a, 0xe0, 0x03, 0x0a, 0x0e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, + 0x55, 0x73, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, + 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, + 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x74, 0x0a, 0x11, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, + 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, + 0x61, 0x6b, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x77, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x6e, + 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, 0x2e, + 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x43, 0x0a, 0x15, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x44, + 0x0a, 0x16, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x04, 0x0a, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x10, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x2a, 0x0a, 0x10, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, + 0x61, 0x6b, 0x65, 0x72, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x69, + 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x69, 0x63, 0x6b, + 0x65, 0x74, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x61, 0x6b, + 0x61, 0x6d, 0x61, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0xf8, 0x01, 0x0a, 0x05, 0x50, 0x61, 0x72, 0x74, 0x79, 0x12, 0x19, 0x0a, + 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x70, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x70, 0x65, 0x6e, 0x12, 0x19, 0x0a, 0x08, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, + 0x6d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, + 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, + 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, + 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, + 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0x3c, + 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x74, 0x79, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x70, 0x65, + 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x26, 0x0a, 0x09, + 0x50, 0x61, 0x72, 0x74, 0x79, 0x4a, 0x6f, 0x69, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, + 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, + 0x74, 0x79, 0x49, 0x64, 0x22, 0x27, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4c, 0x65, 0x61, + 0x76, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x22, 0x64, 0x0a, + 0x0c, 0x50, 0x61, 0x72, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x19, 0x0a, + 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, + 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, + 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, + 0x6e, 0x63, 0x65, 0x22, 0x63, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4c, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x39, 0x0a, + 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x08, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x63, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x74, + 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, + 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, + 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x63, 0x0a, + 0x0b, 0x50, 0x61, 0x72, 0x74, 0x79, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x19, 0x0a, 0x08, + 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, + 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, + 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, + 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, + 0x63, 0x65, 0x22, 0x27, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x74, 0x79, 0x43, 0x6c, 0x6f, 0x73, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x22, 0x31, 0x0a, 0x14, 0x50, + 0x61, 0x72, 0x74, 0x79, 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x22, 0x6a, + 0x0a, 0x10, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x3b, 0x0a, + 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, + 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0xdd, 0x03, 0x0a, 0x12, 0x50, + 0x61, 0x72, 0x74, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x41, 0x64, + 0x64, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x6d, 0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x61, + 0x78, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x66, 0x0a, 0x11, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, + 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x41, 0x64, 0x64, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x10, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x12, 0x69, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3a, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x41, 0x64, 0x64, 0x2e, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, @@ -4771,263 +4926,114 @@ var file_realtime_proto_rawDesc = []byte{ 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd9, 0x05, 0x0a, 0x11, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, - 0x12, 0x16, 0x0a, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x47, 0x0a, - 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6e, - 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, 0x52, - 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x12, 0x45, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, - 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, - 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, - 0x6b, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x1a, 0xe0, 0x03, - 0x0a, 0x0e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, - 0x12, 0x39, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, - 0x65, 0x52, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, - 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, - 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x74, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x47, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x64, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, - 0x55, 0x73, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x77, 0x0a, 0x12, - 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, - 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x2e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, 0x2e, 0x4e, 0x75, 0x6d, 0x65, - 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x11, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x43, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x44, 0x0a, 0x16, 0x4e, 0x75, - 0x6d, 0x65, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x42, 0x04, 0x0a, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x10, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, - 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x69, - 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x22, 0x2a, 0x0a, 0x10, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x4f, - 0x0a, 0x0d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x3e, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xf8, 0x01, 0x0a, 0x05, 0x50, 0x61, 0x72, 0x74, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, - 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, - 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x04, 0x6f, 0x70, 0x65, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x53, - 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, - 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, - 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, - 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, - 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, - 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0x3c, 0x0a, 0x0b, 0x50, 0x61, - 0x72, 0x74, 0x79, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x70, 0x65, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x70, 0x65, 0x6e, 0x12, 0x19, 0x0a, - 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x07, 0x6d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x26, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, - 0x79, 0x4a, 0x6f, 0x69, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, - 0x22, 0x27, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x19, - 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x22, 0x64, 0x0a, 0x0c, 0x50, 0x61, 0x72, - 0x74, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, - 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, - 0x74, 0x79, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, - 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x22, - 0x63, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x19, - 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, - 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, - 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x70, 0x72, 0x65, 0x73, - 0x65, 0x6e, 0x63, 0x65, 0x22, 0x63, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x74, 0x79, 0x41, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x39, - 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, - 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, - 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x63, 0x0a, 0x0b, 0x50, 0x61, 0x72, - 0x74, 0x79, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, - 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, - 0x79, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, - 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, - 0x65, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x27, - 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x74, 0x79, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, - 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x22, 0x31, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x74, 0x79, - 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, - 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x22, 0x6a, 0x0a, 0x10, 0x50, 0x61, - 0x72, 0x74, 0x79, 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, - 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x09, 0x70, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, - 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, - 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0xdd, 0x03, 0x0a, 0x12, 0x50, 0x61, 0x72, 0x74, 0x79, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x41, 0x64, 0x64, 0x12, 0x19, 0x0a, - 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x69, 0x6e, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x66, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, - 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x41, 0x64, 0x64, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x12, 0x69, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x70, - 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x6e, - 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x50, - 0x61, 0x72, 0x74, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x41, 0x64, - 0x64, 0x2e, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, - 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x43, 0x0a, 0x15, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0x44, 0x0a, 0x16, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x15, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x15, 0x50, 0x61, + 0x72, 0x74, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x4a, 0x0a, 0x15, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x22, 0x4a, 0x0a, 0x15, 0x50, 0x61, 0x72, 0x74, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, - 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, - 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x8e, - 0x01, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, - 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, - 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, - 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, - 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, - 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x70, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, - 0x57, 0x0a, 0x0d, 0x50, 0x61, 0x72, 0x74, 0x79, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x6e, 0x64, + 0x65, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6f, - 0x70, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x70, - 0x43, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x9b, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x72, - 0x74, 0x79, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, - 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x05, 0x6a, 0x6f, - 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, - 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, - 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x6a, 0x6f, 0x69, 0x6e, 0x73, 0x12, - 0x35, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, - 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, - 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x22, 0x06, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x22, 0x06, - 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67, 0x22, 0x45, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x3b, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, - 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, - 0x63, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0x47, 0x0a, - 0x0c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x19, 0x0a, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x07, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, - 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x33, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x70, 0x72, + 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x70, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x57, 0x0a, 0x0d, 0x50, 0x61, 0x72, 0x74, 0x79, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x65, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, + 0x17, 0x0a, 0x07, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x06, 0x6f, 0x70, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x9b, 0x01, 0x0a, + 0x12, 0x50, 0x61, 0x72, 0x74, 0x79, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x05, 0x6a, 0x6f, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x6a, 0x6f, 0x69, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, - 0x63, 0x65, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x0e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x55, 0x6e, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x19, 0x0a, 0x08, - 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, - 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x44, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x6c, 0x0a, - 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, - 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x62, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x75, 0x62, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x22, 0xa4, 0x01, 0x0a, 0x0a, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x61, 0x6b, - 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x35, 0x0a, 0x06, 0x73, - 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, - 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, - 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, - 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x69, 0x61, 0x62, - 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x69, 0x61, 0x62, - 0x6c, 0x65, 0x22, 0xb2, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x61, 0x6b, - 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x33, 0x0a, 0x05, 0x6a, - 0x6f, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, - 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, - 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x6a, 0x6f, 0x69, 0x6e, 0x73, - 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, - 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, - 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x0c, 0x55, 0x73, 0x65, 0x72, - 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x34, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x42, 0x6c, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x65, 0x72, 0x6f, - 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x74, - 0x61, 0x70, 0x69, 0x42, 0x0e, 0x4e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x52, 0x65, 0x61, 0x6c, 0x74, - 0x69, 0x6d, 0x65, 0x50, 0x01, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x68, 0x65, 0x72, 0x6f, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x6e, 0x61, 0x6b, - 0x61, 0x6d, 0x61, 0x2d, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x72, 0x74, 0x61, 0x70, 0x69, - 0xaa, 0x02, 0x0f, 0x4e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x65, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x22, 0x06, 0x0a, 0x04, 0x50, 0x69, + 0x6e, 0x67, 0x22, 0x06, 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67, 0x22, 0x45, 0x0a, 0x06, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x3b, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, + 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, + 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, + 0x73, 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, + 0x77, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x09, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x13, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x6a, 0x6f, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x05, 0x6a, 0x6f, 0x69, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, + 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, + 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x22, 0x2b, + 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x6e, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, + 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x44, 0x0a, 0x0c, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x22, 0x6c, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6d, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x62, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x75, 0x62, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x22, + 0xa4, 0x01, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2f, + 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, + 0x35, 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, + 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x6c, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, + 0x6c, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x22, 0xb2, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2f, + 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, + 0x33, 0x0a, 0x05, 0x6a, 0x6f, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x6a, + 0x6f, 0x69, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x72, 0x65, + 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x0c, + 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, + 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x63, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x6c, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x2e, + 0x68, 0x65, 0x72, 0x6f, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2e, 0x6e, 0x61, 0x6b, 0x61, 0x6d, + 0x61, 0x2e, 0x72, 0x74, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x4e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x52, + 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x01, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x65, 0x72, 0x6f, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, + 0x2f, 0x6e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2d, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x72, + 0x74, 0x61, 0x70, 0x69, 0xaa, 0x02, 0x0f, 0x4e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/heroiclabs/nakama-common/rtapi/realtime.proto b/vendor/github.com/heroiclabs/nakama-common/rtapi/realtime.proto index e8b3c2e8e..5bb7c5b2c 100644 --- a/vendor/github.com/heroiclabs/nakama-common/rtapi/realtime.proto +++ b/vendor/github.com/heroiclabs/nakama-common/rtapi/realtime.proto @@ -301,7 +301,10 @@ message Match { } // Create a new realtime match. -message MatchCreate {} +message MatchCreate { + // Optional name to use when creating the match. + string name = 1; +} // Realtime match data received from the server. message MatchData { diff --git a/vendor/github.com/jackc/chunkreader/v2/go.mod b/vendor/github.com/jackc/chunkreader/v2/go.mod deleted file mode 100644 index a1384b407..000000000 --- a/vendor/github.com/jackc/chunkreader/v2/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/jackc/chunkreader/v2 - -go 1.12 diff --git a/vendor/github.com/jackc/pgconn/CHANGELOG.md b/vendor/github.com/jackc/pgconn/CHANGELOG.md index c377b3ed4..45c02f1e9 100644 --- a/vendor/github.com/jackc/pgconn/CHANGELOG.md +++ b/vendor/github.com/jackc/pgconn/CHANGELOG.md @@ -1,3 +1,16 @@ +# 1.10.0 (July 24, 2021) + +* net.Timeout errors are no longer returned when a query is canceled via context. A wrapped context error is returned. + +# 1.9.0 (July 10, 2021) + +* pgconn.Timeout only is true for errors originating in pgconn (Michael Darr) +* Add defaults for sslcert, sslkey, and sslrootcert (Joshua Brindle) +* Solve issue with 'sslmode=verify-full' when there are multiple hosts (mgoddard) +* Fix default host when parsing URL without host but with port +* Allow dbname query parameter in URL conn string +* Update underlying dependencies + # 1.8.1 (March 25, 2021) * Better connection string sanitization (ip.novikov) diff --git a/vendor/github.com/jackc/pgconn/config.go b/vendor/github.com/jackc/pgconn/config.go index c162d3c35..172e7478b 100644 --- a/vendor/github.com/jackc/pgconn/config.go +++ b/vendor/github.com/jackc/pgconn/config.go @@ -297,7 +297,7 @@ func ParseConfig(connString string) (*Config, error) { tlsConfigs = append(tlsConfigs, nil) } else { var err error - tlsConfigs, err = configTLS(settings) + tlsConfigs, err = configTLS(settings, host) if err != nil { return nil, &parseConfigError{connString: connString, msg: "failed to configure TLS", err: err} } @@ -411,8 +411,12 @@ func parseURLSettings(connString string) (map[string]string, error) { if err != nil { return nil, fmt.Errorf("failed to split host:port in '%s', err: %w", host, err) } - hosts = append(hosts, h) - ports = append(ports, p) + if h != "" { + hosts = append(hosts, h) + } + if p != "" { + ports = append(ports, p) + } } if len(hosts) > 0 { settings["host"] = strings.Join(hosts, ",") @@ -426,7 +430,15 @@ func parseURLSettings(connString string) (map[string]string, error) { settings["database"] = database } + nameMap := map[string]string{ + "dbname": "database", + } + for k, v := range url.Query() { + if k2, present := nameMap[k]; present { + k = k2 + } + settings[k] = v[0] } @@ -540,8 +552,8 @@ func parseServiceSettings(servicefilePath, serviceName string) (map[string]strin // configTLS uses libpq's TLS parameters to construct []*tls.Config. It is // necessary to allow returning multiple TLS configs as sslmode "allow" and // "prefer" allow fallback. -func configTLS(settings map[string]string) ([]*tls.Config, error) { - host := settings["host"] +func configTLS(settings map[string]string, thisHost string) ([]*tls.Config, error) { + host := thisHost sslmode := settings["sslmode"] sslrootcert := settings["sslrootcert"] sslcert := settings["sslcert"] diff --git a/vendor/github.com/jackc/pgconn/defaults.go b/vendor/github.com/jackc/pgconn/defaults.go index d3313481f..f69cad317 100644 --- a/vendor/github.com/jackc/pgconn/defaults.go +++ b/vendor/github.com/jackc/pgconn/defaults.go @@ -22,6 +22,19 @@ func defaultSettings() map[string]string { settings["user"] = user.Username settings["passfile"] = filepath.Join(user.HomeDir, ".pgpass") settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf") + sslcert := filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + sslkey := filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + if _, err := os.Stat(sslcert); err == nil { + if _, err := os.Stat(sslkey); err == nil { + // Both the cert and key must be present to use them, or do not use either + settings["sslcert"] = sslcert + settings["sslkey"] = sslkey + } + } + sslrootcert := filepath.Join(user.HomeDir, ".postgresql", "root.crt") + if _, err := os.Stat(sslrootcert); err == nil { + settings["sslrootcert"] = sslrootcert + } } settings["target_session_attrs"] = "any" diff --git a/vendor/github.com/jackc/pgconn/defaults_windows.go b/vendor/github.com/jackc/pgconn/defaults_windows.go index 55243700c..71eb77dba 100644 --- a/vendor/github.com/jackc/pgconn/defaults_windows.go +++ b/vendor/github.com/jackc/pgconn/defaults_windows.go @@ -29,6 +29,19 @@ func defaultSettings() map[string]string { settings["user"] = username settings["passfile"] = filepath.Join(appData, "postgresql", "pgpass.conf") settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf") + sslcert := filepath.Join(appData, "postgresql", "postgresql.crt") + sslkey := filepath.Join(appData, "postgresql", "postgresql.key") + if _, err := os.Stat(sslcert); err == nil { + if _, err := os.Stat(sslkey); err == nil { + // Both the cert and key must be present to use them, or do not use either + settings["sslcert"] = sslcert + settings["sslkey"] = sslkey + } + } + sslrootcert := filepath.Join(appData, "postgresql", "root.crt") + if _, err := os.Stat(sslrootcert); err == nil { + settings["sslrootcert"] = sslrootcert + } } settings["target_session_attrs"] = "any" diff --git a/vendor/github.com/jackc/pgconn/errors.go b/vendor/github.com/jackc/pgconn/errors.go index 77adfcf0b..a32b29c92 100644 --- a/vendor/github.com/jackc/pgconn/errors.go +++ b/vendor/github.com/jackc/pgconn/errors.go @@ -18,15 +18,11 @@ func SafeToRetry(err error) bool { return false } -// Timeout checks if err was was caused by a timeout. To be specific, it is true if err is or was caused by a +// Timeout checks if err was was caused by a timeout. To be specific, it is true if err was caused within pgconn by a // context.Canceled, context.DeadlineExceeded or an implementer of net.Error where Timeout() is true. func Timeout(err error) bool { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return true - } - - var netErr net.Error - return errors.As(err, &netErr) && netErr.Timeout() + var timeoutErr *errTimeout + return errors.As(err, &timeoutErr) } // PgError represents an error reported by the PostgreSQL server. See @@ -110,6 +106,15 @@ func (e *parseConfigError) Unwrap() error { return e.err } +// preferContextOverNetTimeoutError returns ctx.Err() if ctx.Err() is present and err is a net.Error with Timeout() == +// true. Otherwise returns err. +func preferContextOverNetTimeoutError(ctx context.Context, err error) error { + if err, ok := err.(net.Error); ok && err.Timeout() && ctx.Err() != nil { + return &errTimeout{err: ctx.Err()} + } + return err +} + type pgconnError struct { msg string err error @@ -134,6 +139,24 @@ func (e *pgconnError) Unwrap() error { return e.err } +// errTimeout occurs when an error was caused by a timeout. Specifically, it wraps an error which is +// context.Canceled, context.DeadlineExceeded, or an implementer of net.Error where Timeout() is true. +type errTimeout struct { + err error +} + +func (e *errTimeout) Error() string { + return fmt.Sprintf("timeout: %s", e.err.Error()) +} + +func (e *errTimeout) SafeToRetry() bool { + return SafeToRetry(e.err) +} + +func (e *errTimeout) Unwrap() error { + return e.err +} + type contextAlreadyDoneError struct { err error } @@ -150,6 +173,11 @@ func (e *contextAlreadyDoneError) Unwrap() error { return e.err } +// newContextAlreadyDoneError double-wraps a context error in `contextAlreadyDoneError` and `errTimeout`. +func newContextAlreadyDoneError(ctx context.Context) (err error) { + return &errTimeout{&contextAlreadyDoneError{err: ctx.Err()}} +} + type writeError struct { err error safeToRetry bool diff --git a/vendor/github.com/jackc/pgconn/go.mod b/vendor/github.com/jackc/pgconn/go.mod deleted file mode 100644 index e9003cb73..000000000 --- a/vendor/github.com/jackc/pgconn/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module github.com/jackc/pgconn - -go 1.12 - -require ( - github.com/jackc/chunkreader/v2 v2.0.1 - github.com/jackc/pgio v1.0.0 - github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 - github.com/jackc/pgpassfile v1.0.0 - github.com/jackc/pgproto3/v2 v2.0.6 - github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b - github.com/stretchr/testify v1.5.1 - golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 - golang.org/x/text v0.3.3 -) diff --git a/vendor/github.com/jackc/pgconn/go.sum b/vendor/github.com/jackc/pgconn/go.sum deleted file mode 100644 index 58bb1286a..000000000 --- a/vendor/github.com/jackc/pgconn/go.sum +++ /dev/null @@ -1,136 +0,0 @@ -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0 h1:DUwgMQuuPnS0rhMXenUtZpqZqrR/30NWY+qQvTpSvEs= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29 h1:f2HwOeI1NIJyNFVVeh1gUISyt57iw/fmI/IXJfH3ATE= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1 h1:Rdjp4NFjwHnEslx2b66FfCI2S0LhO4itac3hXz6WX9M= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.2 h1:q1Hsy66zh4vuNsajBUF2PNqfAMMfxU5mk594lPE9vjY= -github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.3 h1:2S4PhE00mvdvaSiCYR1ZCmR1NAxeYfTSsqqSKxE1vzo= -github.com/jackc/pgproto3/v2 v2.0.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.4 h1:RHkX5ZUD9bl/kn0f9dYUWs1N7Nwvo1wwUYvKiR26Zco= -github.com/jackc/pgproto3/v2 v2.0.4/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.5 h1:NUbEWPmCQZbMmYlTjVoNPhc0CfnYyz2bfUAh6A5ZVJM= -github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.6 h1:b1105ZGEMFe7aCvrT1Cca3VoVb4ZFMaFJLJcg/3zD+8= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 h1:Q3tB+ExeflWUW7AFcAhXqk40s9mnNYLk1nOkKNZ5GnU= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jackc/pgconn/pgconn.go b/vendor/github.com/jackc/pgconn/pgconn.go index 197aad4ad..43b13e43a 100644 --- a/vendor/github.com/jackc/pgconn/pgconn.go +++ b/vendor/github.com/jackc/pgconn/pgconn.go @@ -217,6 +217,10 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig network, address := NetworkAddress(fallbackConfig.Host, fallbackConfig.Port) pgConn.conn, err = config.DialFunc(ctx, network, address) if err != nil { + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { + err = &errTimeout{err: err} + } return nil, &connectError{config: config, msg: "dial error", err: err} } @@ -267,7 +271,7 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig if err, ok := err.(*PgError); ok { return nil, err } - return nil, &connectError{config: config, msg: "failed to receive message", err: err} + return nil, &connectError{config: config, msg: "failed to receive message", err: preferContextOverNetTimeoutError(ctx, err)} } switch msg := msg.(type) { @@ -389,7 +393,7 @@ func (pgConn *PgConn) SendBytes(ctx context.Context, buf []byte) error { if ctx != context.Background() { select { case <-ctx.Done(): - return &contextAlreadyDoneError{err: ctx.Err()} + return newContextAlreadyDoneError(ctx) default: } pgConn.contextWatcher.Watch(ctx) @@ -421,7 +425,7 @@ func (pgConn *PgConn) ReceiveMessage(ctx context.Context) (pgproto3.BackendMessa if ctx != context.Background() { select { case <-ctx.Done(): - return nil, &contextAlreadyDoneError{err: ctx.Err()} + return nil, newContextAlreadyDoneError(ctx) default: } pgConn.contextWatcher.Watch(ctx) @@ -430,7 +434,10 @@ func (pgConn *PgConn) ReceiveMessage(ctx context.Context) (pgproto3.BackendMessa msg, err := pgConn.receiveMessage() if err != nil { - err = &pgconnError{msg: "receive message failed", err: err, safeToRetry: true} + err = &pgconnError{ + msg: "receive message failed", + err: preferContextOverNetTimeoutError(ctx, err), + safeToRetry: true} } return msg, err } @@ -451,7 +458,8 @@ func (pgConn *PgConn) peekMessage() (pgproto3.BackendMessage, error) { pgConn.bufferingReceive = false // If a timeout error happened in the background try the read again. - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { msg, err = pgConn.frontend.Receive() } } else { @@ -460,7 +468,9 @@ func (pgConn *PgConn) peekMessage() (pgproto3.BackendMessage, error) { if err != nil { // Close on anything other than timeout error - everything else is fatal - if err, ok := err.(net.Error); !(ok && err.Timeout()) { + var netErr net.Error + isNetErr := errors.As(err, &netErr) + if !(isNetErr && netErr.Timeout()) { pgConn.asyncClose() } @@ -476,7 +486,9 @@ func (pgConn *PgConn) receiveMessage() (pgproto3.BackendMessage, error) { msg, err := pgConn.peekMessage() if err != nil { // Close on anything other than timeout error - everything else is fatal - if err, ok := err.(net.Error); !(ok && err.Timeout()) { + var netErr net.Error + isNetErr := errors.As(err, &netErr) + if !(isNetErr && netErr.Timeout()) { pgConn.asyncClose() } @@ -745,7 +757,7 @@ func (pgConn *PgConn) Prepare(ctx context.Context, name, sql string, paramOIDs [ if ctx != context.Background() { select { case <-ctx.Done(): - return nil, &contextAlreadyDoneError{err: ctx.Err()} + return nil, newContextAlreadyDoneError(ctx) default: } pgConn.contextWatcher.Watch(ctx) @@ -772,7 +784,7 @@ readloop: msg, err := pgConn.receiveMessage() if err != nil { pgConn.asyncClose() - return nil, err + return nil, preferContextOverNetTimeoutError(ctx, err) } switch msg := msg.(type) { @@ -875,7 +887,7 @@ func (pgConn *PgConn) WaitForNotification(ctx context.Context) error { if ctx != context.Background() { select { case <-ctx.Done(): - return ctx.Err() + return newContextAlreadyDoneError(ctx) default: } @@ -886,7 +898,7 @@ func (pgConn *PgConn) WaitForNotification(ctx context.Context) error { for { msg, err := pgConn.receiveMessage() if err != nil { - return err + return preferContextOverNetTimeoutError(ctx, err) } switch msg.(type) { @@ -918,7 +930,7 @@ func (pgConn *PgConn) Exec(ctx context.Context, sql string) *MultiResultReader { select { case <-ctx.Done(): multiResult.closed = true - multiResult.err = &contextAlreadyDoneError{err: ctx.Err()} + multiResult.err = newContextAlreadyDoneError(ctx) pgConn.unlock() return multiResult default: @@ -964,7 +976,7 @@ func (pgConn *PgConn) ReceiveResults(ctx context.Context) *MultiResultReader { select { case <-ctx.Done(): multiResult.closed = true - multiResult.err = &contextAlreadyDoneError{err: ctx.Err()} + multiResult.err = newContextAlreadyDoneError(ctx) pgConn.unlock() return multiResult default: @@ -1058,7 +1070,7 @@ func (pgConn *PgConn) execExtendedPrefix(ctx context.Context, paramValues [][]by if ctx != context.Background() { select { case <-ctx.Done(): - result.concludeCommand(nil, &contextAlreadyDoneError{err: ctx.Err()}) + result.concludeCommand(nil, newContextAlreadyDoneError(ctx)) result.closed = true pgConn.unlock() return result @@ -1098,7 +1110,7 @@ func (pgConn *PgConn) CopyTo(ctx context.Context, w io.Writer, sql string) (Comm select { case <-ctx.Done(): pgConn.unlock() - return nil, &contextAlreadyDoneError{err: ctx.Err()} + return nil, newContextAlreadyDoneError(ctx) default: } pgConn.contextWatcher.Watch(ctx) @@ -1123,7 +1135,7 @@ func (pgConn *PgConn) CopyTo(ctx context.Context, w io.Writer, sql string) (Comm msg, err := pgConn.receiveMessage() if err != nil { pgConn.asyncClose() - return nil, err + return nil, preferContextOverNetTimeoutError(ctx, err) } switch msg := msg.(type) { @@ -1158,7 +1170,7 @@ func (pgConn *PgConn) CopyFrom(ctx context.Context, r io.Reader, sql string) (Co if ctx != context.Background() { select { case <-ctx.Done(): - return nil, &contextAlreadyDoneError{err: ctx.Err()} + return nil, newContextAlreadyDoneError(ctx) default: } pgConn.contextWatcher.Watch(ctx) @@ -1183,7 +1195,7 @@ func (pgConn *PgConn) CopyFrom(ctx context.Context, r io.Reader, sql string) (Co msg, err := pgConn.receiveMessage() if err != nil { pgConn.asyncClose() - return nil, err + return nil, preferContextOverNetTimeoutError(ctx, err) } switch msg := msg.(type) { @@ -1242,7 +1254,7 @@ func (pgConn *PgConn) CopyFrom(ctx context.Context, r io.Reader, sql string) (Co msg, err := pgConn.receiveMessage() if err != nil { pgConn.asyncClose() - return nil, err + return nil, preferContextOverNetTimeoutError(ctx, err) } switch msg := msg.(type) { @@ -1274,7 +1286,7 @@ func (pgConn *PgConn) CopyFrom(ctx context.Context, r io.Reader, sql string) (Co msg, err := pgConn.receiveMessage() if err != nil { pgConn.asyncClose() - return nil, err + return nil, preferContextOverNetTimeoutError(ctx, err) } switch msg := msg.(type) { @@ -1316,7 +1328,7 @@ func (mrr *MultiResultReader) receiveMessage() (pgproto3.BackendMessage, error) if err != nil { mrr.pgConn.contextWatcher.Unwatch() - mrr.err = err + mrr.err = preferContextOverNetTimeoutError(mrr.ctx, err) mrr.closed = true mrr.pgConn.asyncClose() return nil, mrr.err @@ -1523,6 +1535,7 @@ func (rr *ResultReader) receiveMessage() (msg pgproto3.BackendMessage, err error } if err != nil { + err = preferContextOverNetTimeoutError(rr.ctx, err) rr.concludeCommand(nil, err) rr.pgConn.contextWatcher.Unwatch() rr.closed = true @@ -1601,7 +1614,7 @@ func (pgConn *PgConn) ExecBatch(ctx context.Context, batch *Batch) *MultiResultR select { case <-ctx.Done(): multiResult.closed = true - multiResult.err = &contextAlreadyDoneError{err: ctx.Err()} + multiResult.err = newContextAlreadyDoneError(ctx) pgConn.unlock() return multiResult default: diff --git a/vendor/github.com/jackc/pgerrcode/go.mod b/vendor/github.com/jackc/pgerrcode/go.mod deleted file mode 100644 index c5400979a..000000000 --- a/vendor/github.com/jackc/pgerrcode/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/jackc/pgerrcode - -go 1.12 diff --git a/vendor/github.com/jackc/pgio/go.mod b/vendor/github.com/jackc/pgio/go.mod deleted file mode 100644 index c1efdddb6..000000000 --- a/vendor/github.com/jackc/pgio/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/jackc/pgio - -go 1.12 diff --git a/vendor/github.com/jackc/pgpassfile/go.mod b/vendor/github.com/jackc/pgpassfile/go.mod deleted file mode 100644 index 48d90e313..000000000 --- a/vendor/github.com/jackc/pgpassfile/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/jackc/pgpassfile - -go 1.12 - -require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/jackc/pgpassfile/go.sum b/vendor/github.com/jackc/pgpassfile/go.sum deleted file mode 100644 index 4347755af..000000000 --- a/vendor/github.com/jackc/pgpassfile/go.sum +++ /dev/null @@ -1,7 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/jackc/pgproto3/v2/authentication_cleartext_password.go b/vendor/github.com/jackc/pgproto3/v2/authentication_cleartext_password.go index dd82c7a77..241fa6005 100644 --- a/vendor/github.com/jackc/pgproto3/v2/authentication_cleartext_password.go +++ b/vendor/github.com/jackc/pgproto3/v2/authentication_cleartext_password.go @@ -2,6 +2,7 @@ package pgproto3 import ( "encoding/binary" + "encoding/json" "errors" "github.com/jackc/pgio" @@ -14,6 +15,9 @@ type AuthenticationCleartextPassword struct { // Backend identifies this message as sendable by the PostgreSQL backend. func (*AuthenticationCleartextPassword) Backend() {} +// Backend identifies this message as an authentication response. +func (*AuthenticationCleartextPassword) AuthenticationResponse() {} + // Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message // type identifier and 4 byte message length. func (dst *AuthenticationCleartextPassword) Decode(src []byte) error { @@ -37,3 +41,12 @@ func (src *AuthenticationCleartextPassword) Encode(dst []byte) []byte { dst = pgio.AppendUint32(dst, AuthTypeCleartextPassword) return dst } + +// MarshalJSON implements encoding/json.Marshaler. +func (src AuthenticationCleartextPassword) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type string + }{ + Type: "AuthenticationCleartextPassword", + }) +} diff --git a/vendor/github.com/jackc/pgproto3/v2/authentication_md5_password.go b/vendor/github.com/jackc/pgproto3/v2/authentication_md5_password.go index d505d2649..32ec0390e 100644 --- a/vendor/github.com/jackc/pgproto3/v2/authentication_md5_password.go +++ b/vendor/github.com/jackc/pgproto3/v2/authentication_md5_password.go @@ -2,6 +2,7 @@ package pgproto3 import ( "encoding/binary" + "encoding/json" "errors" "github.com/jackc/pgio" @@ -15,6 +16,9 @@ type AuthenticationMD5Password struct { // Backend identifies this message as sendable by the PostgreSQL backend. func (*AuthenticationMD5Password) Backend() {} +// Backend identifies this message as an authentication response. +func (*AuthenticationMD5Password) AuthenticationResponse() {} + // Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message // type identifier and 4 byte message length. func (dst *AuthenticationMD5Password) Decode(src []byte) error { @@ -41,3 +45,33 @@ func (src *AuthenticationMD5Password) Encode(dst []byte) []byte { dst = append(dst, src.Salt[:]...) return dst } + +// MarshalJSON implements encoding/json.Marshaler. +func (src AuthenticationMD5Password) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type string + Salt [4]byte + }{ + Type: "AuthenticationMD5Password", + Salt: src.Salt, + }) +} + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *AuthenticationMD5Password) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + Type string + Salt [4]byte + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + dst.Salt = msg.Salt + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/authentication_ok.go b/vendor/github.com/jackc/pgproto3/v2/authentication_ok.go index 7b13c6e01..2b476fe51 100644 --- a/vendor/github.com/jackc/pgproto3/v2/authentication_ok.go +++ b/vendor/github.com/jackc/pgproto3/v2/authentication_ok.go @@ -2,6 +2,7 @@ package pgproto3 import ( "encoding/binary" + "encoding/json" "errors" "github.com/jackc/pgio" @@ -14,6 +15,9 @@ type AuthenticationOk struct { // Backend identifies this message as sendable by the PostgreSQL backend. func (*AuthenticationOk) Backend() {} +// Backend identifies this message as an authentication response. +func (*AuthenticationOk) AuthenticationResponse() {} + // Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message // type identifier and 4 byte message length. func (dst *AuthenticationOk) Decode(src []byte) error { @@ -37,3 +41,12 @@ func (src *AuthenticationOk) Encode(dst []byte) []byte { dst = pgio.AppendUint32(dst, AuthTypeOk) return dst } + +// MarshalJSON implements encoding/json.Marshaler. +func (src AuthenticationOk) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type string + }{ + Type: "AuthenticationOK", + }) +} diff --git a/vendor/github.com/jackc/pgproto3/v2/authentication_sasl.go b/vendor/github.com/jackc/pgproto3/v2/authentication_sasl.go index c57ae32de..bdcb2c367 100644 --- a/vendor/github.com/jackc/pgproto3/v2/authentication_sasl.go +++ b/vendor/github.com/jackc/pgproto3/v2/authentication_sasl.go @@ -3,6 +3,7 @@ package pgproto3 import ( "bytes" "encoding/binary" + "encoding/json" "errors" "github.com/jackc/pgio" @@ -16,6 +17,9 @@ type AuthenticationSASL struct { // Backend identifies this message as sendable by the PostgreSQL backend. func (*AuthenticationSASL) Backend() {} +// Backend identifies this message as an authentication response. +func (*AuthenticationSASL) AuthenticationResponse() {} + // Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message // type identifier and 4 byte message length. func (dst *AuthenticationSASL) Decode(src []byte) error { @@ -58,3 +62,14 @@ func (src *AuthenticationSASL) Encode(dst []byte) []byte { return dst } + +// MarshalJSON implements encoding/json.Marshaler. +func (src AuthenticationSASL) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type string + AuthMechanisms []string + }{ + Type: "AuthenticationSASL", + AuthMechanisms: src.AuthMechanisms, + }) +} diff --git a/vendor/github.com/jackc/pgproto3/v2/authentication_sasl_continue.go b/vendor/github.com/jackc/pgproto3/v2/authentication_sasl_continue.go index 1b918a6ef..7f4a9c235 100644 --- a/vendor/github.com/jackc/pgproto3/v2/authentication_sasl_continue.go +++ b/vendor/github.com/jackc/pgproto3/v2/authentication_sasl_continue.go @@ -2,6 +2,7 @@ package pgproto3 import ( "encoding/binary" + "encoding/json" "errors" "github.com/jackc/pgio" @@ -15,6 +16,9 @@ type AuthenticationSASLContinue struct { // Backend identifies this message as sendable by the PostgreSQL backend. func (*AuthenticationSASLContinue) Backend() {} +// Backend identifies this message as an authentication response. +func (*AuthenticationSASLContinue) AuthenticationResponse() {} + // Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message // type identifier and 4 byte message length. func (dst *AuthenticationSASLContinue) Decode(src []byte) error { @@ -46,3 +50,32 @@ func (src *AuthenticationSASLContinue) Encode(dst []byte) []byte { return dst } + +// MarshalJSON implements encoding/json.Marshaler. +func (src AuthenticationSASLContinue) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type string + Data string + }{ + Type: "AuthenticationSASLContinue", + Data: string(src.Data), + }) +} + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *AuthenticationSASLContinue) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + Data string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + dst.Data = []byte(msg.Data) + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/authentication_sasl_final.go b/vendor/github.com/jackc/pgproto3/v2/authentication_sasl_final.go index 11d356600..d82b9ee4d 100644 --- a/vendor/github.com/jackc/pgproto3/v2/authentication_sasl_final.go +++ b/vendor/github.com/jackc/pgproto3/v2/authentication_sasl_final.go @@ -2,6 +2,7 @@ package pgproto3 import ( "encoding/binary" + "encoding/json" "errors" "github.com/jackc/pgio" @@ -15,6 +16,9 @@ type AuthenticationSASLFinal struct { // Backend identifies this message as sendable by the PostgreSQL backend. func (*AuthenticationSASLFinal) Backend() {} +// Backend identifies this message as an authentication response. +func (*AuthenticationSASLFinal) AuthenticationResponse() {} + // Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message // type identifier and 4 byte message length. func (dst *AuthenticationSASLFinal) Decode(src []byte) error { @@ -46,3 +50,32 @@ func (src *AuthenticationSASLFinal) Encode(dst []byte) []byte { return dst } + +// MarshalJSON implements encoding/json.Unmarshaler. +func (src AuthenticationSASLFinal) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type string + Data string + }{ + Type: "AuthenticationSASLFinal", + Data: string(src.Data), + }) +} + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *AuthenticationSASLFinal) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + Data string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + dst.Data = []byte(msg.Data) + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/backend.go b/vendor/github.com/jackc/pgproto3/v2/backend.go index 1f854c693..e9ba38fc3 100644 --- a/vendor/github.com/jackc/pgproto3/v2/backend.go +++ b/vendor/github.com/jackc/pgproto3/v2/backend.go @@ -12,27 +12,34 @@ type Backend struct { w io.Writer // Frontend message flyweights - bind Bind - cancelRequest CancelRequest - _close Close - copyFail CopyFail - describe Describe - execute Execute - flush Flush - gssEncRequest GSSEncRequest - parse Parse - passwordMessage PasswordMessage - query Query - sslRequest SSLRequest - startupMessage StartupMessage - sync Sync - terminate Terminate + bind Bind + cancelRequest CancelRequest + _close Close + copyFail CopyFail + copyData CopyData + copyDone CopyDone + describe Describe + execute Execute + flush Flush + gssEncRequest GSSEncRequest + parse Parse + query Query + sslRequest SSLRequest + startupMessage StartupMessage + sync Sync + terminate Terminate bodyLen int msgType byte partialMsg bool + authType uint32 } +const ( + minStartupPacketLen = 4 // minStartupPacketLen is a single 32-bit int version or code. + maxStartupPacketLen = 10000 // maxStartupPacketLen is MAX_STARTUP_PACKET_LENGTH from PG source. +) + // NewBackend creates a new Backend. func NewBackend(cr ChunkReader, w io.Writer) *Backend { return &Backend{cr: cr, w: w} @@ -54,9 +61,13 @@ func (b *Backend) ReceiveStartupMessage() (FrontendMessage, error) { } msgSize := int(binary.BigEndian.Uint32(buf) - 4) + if msgSize < minStartupPacketLen || msgSize > maxStartupPacketLen { + return nil, fmt.Errorf("invalid length of startup packet: %d", msgSize) + } + buf, err = b.cr.Next(msgSize) if err != nil { - return nil, err + return nil, translateEOFtoErrUnexpectedEOF(err) } code := binary.BigEndian.Uint32(buf) @@ -96,7 +107,7 @@ func (b *Backend) Receive() (FrontendMessage, error) { if !b.partialMsg { header, err := b.cr.Next(5) if err != nil { - return nil, err + return nil, translateEOFtoErrUnexpectedEOF(err) } b.msgType = header[0] @@ -116,12 +127,28 @@ func (b *Backend) Receive() (FrontendMessage, error) { msg = &b.execute case 'f': msg = &b.copyFail + case 'd': + msg = &b.copyData + case 'c': + msg = &b.copyDone case 'H': msg = &b.flush case 'P': msg = &b.parse case 'p': - msg = &b.passwordMessage + switch b.authType { + case AuthTypeSASL: + msg = &SASLInitialResponse{} + case AuthTypeSASLContinue: + msg = &SASLResponse{} + case AuthTypeSASLFinal: + msg = &SASLResponse{} + case AuthTypeCleartextPassword, AuthTypeMD5Password: + fallthrough + default: + // to maintain backwards compatability + msg = &PasswordMessage{} + } case 'Q': msg = &b.query case 'S': @@ -134,7 +161,7 @@ func (b *Backend) Receive() (FrontendMessage, error) { msgBody, err := b.cr.Next(b.bodyLen) if err != nil { - return nil, err + return nil, translateEOFtoErrUnexpectedEOF(err) } b.partialMsg = false @@ -142,3 +169,36 @@ func (b *Backend) Receive() (FrontendMessage, error) { err = msg.Decode(msgBody) return msg, err } + +// SetAuthType sets the authentication type in the backend. +// Since multiple message types can start with 'p', SetAuthType allows +// contextual identification of FrontendMessages. For example, in the +// PG message flow documentation for PasswordMessage: +// +// Byte1('p') +// +// Identifies the message as a password response. Note that this is also used for +// GSSAPI, SSPI and SASL response messages. The exact message type can be deduced from +// the context. +// +// Since the Frontend does not know about the state of a backend, it is important +// to call SetAuthType() after an authentication request is received by the Frontend. +func (b *Backend) SetAuthType(authType uint32) error { + switch authType { + case AuthTypeOk, + AuthTypeCleartextPassword, + AuthTypeMD5Password, + AuthTypeSCMCreds, + AuthTypeGSS, + AuthTypeGSSCont, + AuthTypeSSPI, + AuthTypeSASL, + AuthTypeSASLContinue, + AuthTypeSASLFinal: + b.authType = authType + default: + return fmt.Errorf("authType not recognized: %d", authType) + } + + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/bind.go b/vendor/github.com/jackc/pgproto3/v2/bind.go index 52372095d..e9664f59f 100644 --- a/vendor/github.com/jackc/pgproto3/v2/bind.go +++ b/vendor/github.com/jackc/pgproto3/v2/bind.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "encoding/hex" "encoding/json" + "fmt" "github.com/jackc/pgio" ) @@ -181,3 +182,35 @@ func (src Bind) MarshalJSON() ([]byte, error) { ResultFormatCodes: src.ResultFormatCodes, }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *Bind) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + DestinationPortal string + PreparedStatement string + ParameterFormatCodes []int16 + Parameters []map[string]string + ResultFormatCodes []int16 + } + err := json.Unmarshal(data, &msg) + if err != nil { + return err + } + dst.DestinationPortal = msg.DestinationPortal + dst.PreparedStatement = msg.PreparedStatement + dst.ParameterFormatCodes = msg.ParameterFormatCodes + dst.Parameters = make([][]byte, len(msg.Parameters)) + dst.ResultFormatCodes = msg.ResultFormatCodes + for n, parameter := range msg.Parameters { + dst.Parameters[n], err = getValueFromJSON(parameter) + if err != nil { + return fmt.Errorf("cannot get param %d: %w", n, err) + } + } + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/close.go b/vendor/github.com/jackc/pgproto3/v2/close.go index 382969093..a45f2b930 100644 --- a/vendor/github.com/jackc/pgproto3/v2/close.go +++ b/vendor/github.com/jackc/pgproto3/v2/close.go @@ -3,6 +3,7 @@ package pgproto3 import ( "bytes" "encoding/json" + "errors" "github.com/jackc/pgio" ) @@ -62,3 +63,27 @@ func (src Close) MarshalJSON() ([]byte, error) { Name: src.Name, }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *Close) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + ObjectType string + Name string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + if len(msg.ObjectType) != 1 { + return errors.New("invalid length for Close.ObjectType") + } + + dst.ObjectType = byte(msg.ObjectType[0]) + dst.Name = msg.Name + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/command_complete.go b/vendor/github.com/jackc/pgproto3/v2/command_complete.go index b5106fdaf..cdc49f39f 100644 --- a/vendor/github.com/jackc/pgproto3/v2/command_complete.go +++ b/vendor/github.com/jackc/pgproto3/v2/command_complete.go @@ -51,3 +51,21 @@ func (src CommandComplete) MarshalJSON() ([]byte, error) { CommandTag: string(src.CommandTag), }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *CommandComplete) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + CommandTag string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + dst.CommandTag = []byte(msg.CommandTag) + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/copy_both_response.go b/vendor/github.com/jackc/pgproto3/v2/copy_both_response.go index 2d58f820e..fbd985d86 100644 --- a/vendor/github.com/jackc/pgproto3/v2/copy_both_response.go +++ b/vendor/github.com/jackc/pgproto3/v2/copy_both_response.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "encoding/json" + "errors" "github.com/jackc/pgio" ) @@ -68,3 +69,27 @@ func (src CopyBothResponse) MarshalJSON() ([]byte, error) { ColumnFormatCodes: src.ColumnFormatCodes, }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *CopyBothResponse) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + OverallFormat string + ColumnFormatCodes []uint16 + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + if len(msg.OverallFormat) != 1 { + return errors.New("invalid length for CopyBothResponse.OverallFormat") + } + + dst.OverallFormat = msg.OverallFormat[0] + dst.ColumnFormatCodes = msg.ColumnFormatCodes + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/copy_data.go b/vendor/github.com/jackc/pgproto3/v2/copy_data.go index 7d6002fe0..128aa198c 100644 --- a/vendor/github.com/jackc/pgproto3/v2/copy_data.go +++ b/vendor/github.com/jackc/pgproto3/v2/copy_data.go @@ -42,3 +42,21 @@ func (src CopyData) MarshalJSON() ([]byte, error) { Data: hex.EncodeToString(src.Data), }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *CopyData) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + Data string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + dst.Data = []byte(msg.Data) + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/copy_in_response.go b/vendor/github.com/jackc/pgproto3/v2/copy_in_response.go index 5f2595b87..80733adcf 100644 --- a/vendor/github.com/jackc/pgproto3/v2/copy_in_response.go +++ b/vendor/github.com/jackc/pgproto3/v2/copy_in_response.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "encoding/json" + "errors" "github.com/jackc/pgio" ) @@ -69,3 +70,27 @@ func (src CopyInResponse) MarshalJSON() ([]byte, error) { ColumnFormatCodes: src.ColumnFormatCodes, }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *CopyInResponse) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + OverallFormat string + ColumnFormatCodes []uint16 + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + if len(msg.OverallFormat) != 1 { + return errors.New("invalid length for CopyInResponse.OverallFormat") + } + + dst.OverallFormat = msg.OverallFormat[0] + dst.ColumnFormatCodes = msg.ColumnFormatCodes + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/copy_out_response.go b/vendor/github.com/jackc/pgproto3/v2/copy_out_response.go index 8538dfc7d..5e607e3ac 100644 --- a/vendor/github.com/jackc/pgproto3/v2/copy_out_response.go +++ b/vendor/github.com/jackc/pgproto3/v2/copy_out_response.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "encoding/json" + "errors" "github.com/jackc/pgio" ) @@ -69,3 +70,27 @@ func (src CopyOutResponse) MarshalJSON() ([]byte, error) { ColumnFormatCodes: src.ColumnFormatCodes, }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *CopyOutResponse) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + OverallFormat string + ColumnFormatCodes []uint16 + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + if len(msg.OverallFormat) != 1 { + return errors.New("invalid length for CopyOutResponse.OverallFormat") + } + + dst.OverallFormat = msg.OverallFormat[0] + dst.ColumnFormatCodes = msg.ColumnFormatCodes + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/data_row.go b/vendor/github.com/jackc/pgproto3/v2/data_row.go index 5fa3c5d8c..637687616 100644 --- a/vendor/github.com/jackc/pgproto3/v2/data_row.go +++ b/vendor/github.com/jackc/pgproto3/v2/data_row.go @@ -115,3 +115,28 @@ func (src DataRow) MarshalJSON() ([]byte, error) { Values: formattedValues, }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *DataRow) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + Values []map[string]string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + dst.Values = make([][]byte, len(msg.Values)) + for n, parameter := range msg.Values { + var err error + dst.Values[n], err = getValueFromJSON(parameter) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/describe.go b/vendor/github.com/jackc/pgproto3/v2/describe.go index 308f582e7..0d825db19 100644 --- a/vendor/github.com/jackc/pgproto3/v2/describe.go +++ b/vendor/github.com/jackc/pgproto3/v2/describe.go @@ -3,6 +3,7 @@ package pgproto3 import ( "bytes" "encoding/json" + "errors" "github.com/jackc/pgio" ) @@ -62,3 +63,26 @@ func (src Describe) MarshalJSON() ([]byte, error) { Name: src.Name, }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *Describe) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + ObjectType string + Name string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + if len(msg.ObjectType) != 1 { + return errors.New("invalid length for Describe.ObjectType") + } + + dst.ObjectType = byte(msg.ObjectType[0]) + dst.Name = msg.Name + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/error_response.go b/vendor/github.com/jackc/pgproto3/v2/error_response.go index d444798bc..ec51e0192 100644 --- a/vendor/github.com/jackc/pgproto3/v2/error_response.go +++ b/vendor/github.com/jackc/pgproto3/v2/error_response.go @@ -3,27 +3,29 @@ package pgproto3 import ( "bytes" "encoding/binary" + "encoding/json" "strconv" ) type ErrorResponse struct { - Severity string - Code string - Message string - Detail string - Hint string - Position int32 - InternalPosition int32 - InternalQuery string - Where string - SchemaName string - TableName string - ColumnName string - DataTypeName string - ConstraintName string - File string - Line int32 - Routine string + Severity string + SeverityUnlocalized string // only in 9.6 and greater + Code string + Message string + Detail string + Hint string + Position int32 + InternalPosition int32 + InternalQuery string + Where string + SchemaName string + TableName string + ColumnName string + DataTypeName string + ConstraintName string + File string + Line int32 + Routine string UnknownFields map[byte]string } @@ -56,6 +58,8 @@ func (dst *ErrorResponse) Decode(src []byte) error { switch k { case 'S': dst.Severity = v + case 'V': + dst.SeverityUnlocalized = v case 'C': dst.Code = v case 'M': @@ -123,6 +127,11 @@ func (src *ErrorResponse) marshalBinary(typeByte byte) []byte { buf.WriteString(src.Severity) buf.WriteByte(0) } + if src.SeverityUnlocalized != "" { + buf.WriteByte('V') + buf.WriteString(src.SeverityUnlocalized) + buf.WriteByte(0) + } if src.Code != "" { buf.WriteByte('C') buf.WriteString(src.Code) @@ -210,9 +219,116 @@ func (src *ErrorResponse) marshalBinary(typeByte byte) []byte { buf.WriteString(v) buf.WriteByte(0) } + buf.WriteByte(0) binary.BigEndian.PutUint32(buf.Bytes()[1:5], uint32(buf.Len()-1)) return buf.Bytes() } + +// MarshalJSON implements encoding/json.Marshaler. +func (src ErrorResponse) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type string + Severity string + SeverityUnlocalized string // only in 9.6 and greater + Code string + Message string + Detail string + Hint string + Position int32 + InternalPosition int32 + InternalQuery string + Where string + SchemaName string + TableName string + ColumnName string + DataTypeName string + ConstraintName string + File string + Line int32 + Routine string + + UnknownFields map[byte]string + }{ + Type: "ErrorResponse", + Severity: src.Severity, + SeverityUnlocalized: src.SeverityUnlocalized, + Code: src.Code, + Message: src.Message, + Detail: src.Detail, + Hint: src.Hint, + Position: src.Position, + InternalPosition: src.InternalPosition, + InternalQuery: src.InternalQuery, + Where: src.Where, + SchemaName: src.SchemaName, + TableName: src.TableName, + ColumnName: src.ColumnName, + DataTypeName: src.DataTypeName, + ConstraintName: src.ConstraintName, + File: src.File, + Line: src.Line, + Routine: src.Routine, + UnknownFields: src.UnknownFields, + }) +} + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *ErrorResponse) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + Type string + Severity string + SeverityUnlocalized string // only in 9.6 and greater + Code string + Message string + Detail string + Hint string + Position int32 + InternalPosition int32 + InternalQuery string + Where string + SchemaName string + TableName string + ColumnName string + DataTypeName string + ConstraintName string + File string + Line int32 + Routine string + + UnknownFields map[byte]string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + + dst.Severity = msg.Severity + dst.SeverityUnlocalized = msg.SeverityUnlocalized + dst.Code = msg.Code + dst.Message = msg.Message + dst.Detail = msg.Detail + dst.Hint = msg.Hint + dst.Position = msg.Position + dst.InternalPosition = msg.InternalPosition + dst.InternalQuery = msg.InternalQuery + dst.Where = msg.Where + dst.SchemaName = msg.SchemaName + dst.TableName = msg.TableName + dst.ColumnName = msg.ColumnName + dst.DataTypeName = msg.DataTypeName + dst.ConstraintName = msg.ConstraintName + dst.File = msg.File + dst.Line = msg.Line + dst.Routine = msg.Routine + + dst.UnknownFields = msg.UnknownFields + + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/frontend.go b/vendor/github.com/jackc/pgproto3/v2/frontend.go index b8f545ca8..c33dfb084 100644 --- a/vendor/github.com/jackc/pgproto3/v2/frontend.go +++ b/vendor/github.com/jackc/pgproto3/v2/frontend.go @@ -45,6 +45,7 @@ type Frontend struct { bodyLen int msgType byte partialMsg bool + authType uint32 } // NewFrontend creates a new Frontend. @@ -146,10 +147,16 @@ func (f *Frontend) Receive() (BackendMessage, error) { } // Authentication message type constants. +// See src/include/libpq/pqcomm.h for all +// constants. const ( AuthTypeOk = 0 AuthTypeCleartextPassword = 3 AuthTypeMD5Password = 5 + AuthTypeSCMCreds = 6 + AuthTypeGSS = 7 + AuthTypeGSSCont = 8 + AuthTypeSSPI = 9 AuthTypeSASL = 10 AuthTypeSASLContinue = 11 AuthTypeSASLFinal = 12 @@ -159,15 +166,23 @@ func (f *Frontend) findAuthenticationMessageType(src []byte) (BackendMessage, er if len(src) < 4 { return nil, errors.New("authentication message too short") } - authType := binary.BigEndian.Uint32(src[:4]) + f.authType = binary.BigEndian.Uint32(src[:4]) - switch authType { + switch f.authType { case AuthTypeOk: return &f.authenticationOk, nil case AuthTypeCleartextPassword: return &f.authenticationCleartextPassword, nil case AuthTypeMD5Password: return &f.authenticationMD5Password, nil + case AuthTypeSCMCreds: + return nil, errors.New("AuthTypeSCMCreds is unimplemented") + case AuthTypeGSS: + return nil, errors.New("AuthTypeGSS is unimplemented") + case AuthTypeGSSCont: + return nil, errors.New("AuthTypeGSSCont is unimplemented") + case AuthTypeSSPI: + return nil, errors.New("AuthTypeSSPI is unimplemented") case AuthTypeSASL: return &f.authenticationSASL, nil case AuthTypeSASLContinue: @@ -175,6 +190,12 @@ func (f *Frontend) findAuthenticationMessageType(src []byte) (BackendMessage, er case AuthTypeSASLFinal: return &f.authenticationSASLFinal, nil default: - return nil, fmt.Errorf("unknown authentication type: %d", authType) + return nil, fmt.Errorf("unknown authentication type: %d", f.authType) } } + +// GetAuthType returns the authType used in the current state of the frontend. +// See SetAuthType for more information. +func (f *Frontend) GetAuthType() uint32 { + return f.authType +} diff --git a/vendor/github.com/jackc/pgproto3/v2/function_call_response.go b/vendor/github.com/jackc/pgproto3/v2/function_call_response.go index 5cc2d4d29..53d642221 100644 --- a/vendor/github.com/jackc/pgproto3/v2/function_call_response.go +++ b/vendor/github.com/jackc/pgproto3/v2/function_call_response.go @@ -81,3 +81,21 @@ func (src FunctionCallResponse) MarshalJSON() ([]byte, error) { Result: formattedValue, }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *FunctionCallResponse) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + Result map[string]string + } + err := json.Unmarshal(data, &msg) + if err != nil { + return err + } + dst.Result, err = getValueFromJSON(msg.Result) + return err +} diff --git a/vendor/github.com/jackc/pgproto3/v2/go.mod b/vendor/github.com/jackc/pgproto3/v2/go.mod deleted file mode 100644 index 36041a94a..000000000 --- a/vendor/github.com/jackc/pgproto3/v2/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/jackc/pgproto3/v2 - -go 1.12 - -require ( - github.com/jackc/chunkreader/v2 v2.0.0 - github.com/jackc/pgio v1.0.0 - github.com/stretchr/testify v1.4.0 -) diff --git a/vendor/github.com/jackc/pgproto3/v2/go.sum b/vendor/github.com/jackc/pgproto3/v2/go.sum deleted file mode 100644 index dd9cd044f..000000000 --- a/vendor/github.com/jackc/pgproto3/v2/go.sum +++ /dev/null @@ -1,14 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/jackc/chunkreader/v2 v2.0.0 h1:DUwgMQuuPnS0rhMXenUtZpqZqrR/30NWY+qQvTpSvEs= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jackc/pgproto3/v2/password_message.go b/vendor/github.com/jackc/pgproto3/v2/password_message.go index 4b68b31a8..cae76c50c 100644 --- a/vendor/github.com/jackc/pgproto3/v2/password_message.go +++ b/vendor/github.com/jackc/pgproto3/v2/password_message.go @@ -14,6 +14,9 @@ type PasswordMessage struct { // Frontend identifies this message as sendable by a PostgreSQL frontend. func (*PasswordMessage) Frontend() {} +// Frontend identifies this message as an authentication response. +func (*PasswordMessage) InitialResponse() {} + // Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message // type identifier and 4 byte message length. func (dst *PasswordMessage) Decode(src []byte) error { diff --git a/vendor/github.com/jackc/pgproto3/v2/pgproto3.go b/vendor/github.com/jackc/pgproto3/v2/pgproto3.go index fe7b085bc..70c825e3c 100644 --- a/vendor/github.com/jackc/pgproto3/v2/pgproto3.go +++ b/vendor/github.com/jackc/pgproto3/v2/pgproto3.go @@ -1,6 +1,10 @@ package pgproto3 -import "fmt" +import ( + "encoding/hex" + "errors" + "fmt" +) // Message is the interface implemented by an object that can decode and encode // a particular PostgreSQL message. @@ -23,6 +27,11 @@ type BackendMessage interface { Backend() // no-op method to distinguish frontend from backend methods } +type AuthenticationResponseMessage interface { + BackendMessage + AuthenticationResponse() // no-op method to distinguish authentication responses +} + type invalidMessageLenErr struct { messageType string expectedLen int @@ -40,3 +49,17 @@ type invalidMessageFormatErr struct { func (e *invalidMessageFormatErr) Error() string { return fmt.Sprintf("%s body is invalid", e.messageType) } + +// getValueFromJSON gets the value from a protocol message representation in JSON. +func getValueFromJSON(v map[string]string) ([]byte, error) { + if v == nil { + return nil, nil + } + if text, ok := v["text"]; ok { + return []byte(text), nil + } + if binary, ok := v["binary"]; ok { + return hex.DecodeString(binary) + } + return nil, errors.New("unknown protocol representation") +} diff --git a/vendor/github.com/jackc/pgproto3/v2/ready_for_query.go b/vendor/github.com/jackc/pgproto3/v2/ready_for_query.go index 879afe390..67a39be39 100644 --- a/vendor/github.com/jackc/pgproto3/v2/ready_for_query.go +++ b/vendor/github.com/jackc/pgproto3/v2/ready_for_query.go @@ -2,6 +2,7 @@ package pgproto3 import ( "encoding/json" + "errors" ) type ReadyForQuery struct { @@ -38,3 +39,23 @@ func (src ReadyForQuery) MarshalJSON() ([]byte, error) { TxStatus: string(src.TxStatus), }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *ReadyForQuery) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + TxStatus string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + if len(msg.TxStatus) != 1 { + return errors.New("invalid length for ReadyForQuery.TxStatus") + } + dst.TxStatus = msg.TxStatus[0] + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/row_description.go b/vendor/github.com/jackc/pgproto3/v2/row_description.go index d9b8c7c98..a2e0d28e2 100644 --- a/vendor/github.com/jackc/pgproto3/v2/row_description.go +++ b/vendor/github.com/jackc/pgproto3/v2/row_description.go @@ -132,3 +132,34 @@ func (src RowDescription) MarshalJSON() ([]byte, error) { Fields: src.Fields, }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *RowDescription) UnmarshalJSON(data []byte) error { + var msg struct { + Fields []struct { + Name string + TableOID uint32 + TableAttributeNumber uint16 + DataTypeOID uint32 + DataTypeSize int16 + TypeModifier int32 + Format int16 + } + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + dst.Fields = make([]FieldDescription, len(msg.Fields)) + for n, field := range msg.Fields { + dst.Fields[n] = FieldDescription{ + Name: []byte(field.Name), + TableOID: field.TableOID, + TableAttributeNumber: field.TableAttributeNumber, + DataTypeOID: field.DataTypeOID, + DataTypeSize: field.DataTypeSize, + TypeModifier: field.TypeModifier, + Format: field.Format, + } + } + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/sasl_initial_response.go b/vendor/github.com/jackc/pgproto3/v2/sasl_initial_response.go index 0bf8a9e56..f7e5f36a9 100644 --- a/vendor/github.com/jackc/pgproto3/v2/sasl_initial_response.go +++ b/vendor/github.com/jackc/pgproto3/v2/sasl_initial_response.go @@ -67,3 +67,28 @@ func (src SASLInitialResponse) MarshalJSON() ([]byte, error) { Data: hex.EncodeToString(src.Data), }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *SASLInitialResponse) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + var msg struct { + AuthMechanism string + Data string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + dst.AuthMechanism = msg.AuthMechanism + if msg.Data != "" { + decoded, err := hex.DecodeString(msg.Data) + if err != nil { + return err + } + dst.Data = decoded + } + return nil +} diff --git a/vendor/github.com/jackc/pgproto3/v2/sasl_response.go b/vendor/github.com/jackc/pgproto3/v2/sasl_response.go index 21be6d755..41fb4c397 100644 --- a/vendor/github.com/jackc/pgproto3/v2/sasl_response.go +++ b/vendor/github.com/jackc/pgproto3/v2/sasl_response.go @@ -41,3 +41,21 @@ func (src SASLResponse) MarshalJSON() ([]byte, error) { Data: hex.EncodeToString(src.Data), }) } + +// UnmarshalJSON implements encoding/json.Unmarshaler. +func (dst *SASLResponse) UnmarshalJSON(data []byte) error { + var msg struct { + Data string + } + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + if msg.Data != "" { + decoded, err := hex.DecodeString(msg.Data) + if err != nil { + return err + } + dst.Data = decoded + } + return nil +} diff --git a/vendor/github.com/jackc/pgservicefile/go.mod b/vendor/github.com/jackc/pgservicefile/go.mod deleted file mode 100644 index 051e9e0f4..000000000 --- a/vendor/github.com/jackc/pgservicefile/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/jackc/pgservicefile - -go 1.14 - -require github.com/stretchr/testify v1.5.1 diff --git a/vendor/github.com/jackc/pgservicefile/go.sum b/vendor/github.com/jackc/pgservicefile/go.sum deleted file mode 100644 index a80206ab1..000000000 --- a/vendor/github.com/jackc/pgservicefile/go.sum +++ /dev/null @@ -1,10 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jackc/pgtype/CHANGELOG.md b/vendor/github.com/jackc/pgtype/CHANGELOG.md index d89f6ddcb..64d96fa00 100644 --- a/vendor/github.com/jackc/pgtype/CHANGELOG.md +++ b/vendor/github.com/jackc/pgtype/CHANGELOG.md @@ -1,3 +1,14 @@ +# 1.8.1 (July 24, 2021) + +* Cleaned up Go module dependency chain + +# 1.8.0 (July 10, 2021) + +* Maintain host bits for inet types (Cameron Daniel) +* Support pointers of wrapping structs (Ivan Daunis) +* Register JSONBArray at NewConnInfo() (Rueian) +* CompositeTextScanner handles backslash escapes + # 1.7.0 (March 25, 2021) * Fix scanning int into **sql.Scanner implementor diff --git a/vendor/github.com/jackc/pgtype/composite_type.go b/vendor/github.com/jackc/pgtype/composite_type.go index 7c8dbcd5e..32e0aa26b 100644 --- a/vendor/github.com/jackc/pgtype/composite_type.go +++ b/vendor/github.com/jackc/pgtype/composite_type.go @@ -491,6 +491,10 @@ func (cfs *CompositeTextScanner) Next() bool { } else { break } + } else if ch == '\\' { + cfs.rp++ + cfs.fieldBytes = append(cfs.fieldBytes, cfs.src[cfs.rp]) + cfs.rp++ } else { cfs.fieldBytes = append(cfs.fieldBytes, ch) cfs.rp++ diff --git a/vendor/github.com/jackc/pgtype/convert.go b/vendor/github.com/jackc/pgtype/convert.go index 8ae599b9b..de9ba9ba3 100644 --- a/vendor/github.com/jackc/pgtype/convert.go +++ b/vendor/github.com/jackc/pgtype/convert.go @@ -8,9 +8,11 @@ import ( "time" ) -const maxUint = ^uint(0) -const maxInt = int(maxUint >> 1) -const minInt = -maxInt - 1 +const ( + maxUint = ^uint(0) + maxInt = int(maxUint >> 1) + minInt = -maxInt - 1 +) // underlyingNumberType gets the underlying type that can be converted to Int2, Int4, Int8, Float4, or Float8 func underlyingNumberType(val interface{}) (interface{}, bool) { @@ -387,6 +389,11 @@ func NullAssignTo(dst interface{}) error { var kindTypes map[reflect.Kind]reflect.Type +func toInterface(dst reflect.Value, t reflect.Type) (interface{}, bool) { + nextDst := dst.Convert(t) + return nextDst.Interface(), dst.Type() != nextDst.Type() +} + // GetAssignToDstType attempts to convert dst to something AssignTo can assign // to. If dst is a pointer to pointer it allocates a value and returns the // dereferences pointer. If dst is a named type such as *Foo where Foo is type @@ -412,23 +419,33 @@ func GetAssignToDstType(dst interface{}) (interface{}, bool) { // if dst is pointer to a base type that has been renamed if baseValType, ok := kindTypes[dstVal.Kind()]; ok { - nextDst := dstPtr.Convert(reflect.PtrTo(baseValType)) - return nextDst.Interface(), dstPtr.Type() != nextDst.Type() + return toInterface(dstPtr, reflect.PtrTo(baseValType)) } if dstVal.Kind() == reflect.Slice { if baseElemType, ok := kindTypes[dstVal.Type().Elem().Kind()]; ok { - baseSliceType := reflect.PtrTo(reflect.SliceOf(baseElemType)) - nextDst := dstPtr.Convert(baseSliceType) - return nextDst.Interface(), dstPtr.Type() != nextDst.Type() + return toInterface(dstPtr, reflect.PtrTo(reflect.SliceOf(baseElemType))) } } if dstVal.Kind() == reflect.Array { if baseElemType, ok := kindTypes[dstVal.Type().Elem().Kind()]; ok { - baseArrayType := reflect.PtrTo(reflect.ArrayOf(dstVal.Len(), baseElemType)) - nextDst := dstPtr.Convert(baseArrayType) - return nextDst.Interface(), dstPtr.Type() != nextDst.Type() + return toInterface(dstPtr, reflect.PtrTo(reflect.ArrayOf(dstVal.Len(), baseElemType))) + } + } + + if dstVal.Kind() == reflect.Struct { + if dstVal.Type().NumField() == 1 && dstVal.Type().Field(0).Anonymous { + dstPtr = dstVal.Field(0).Addr() + nested := dstVal.Type().Field(0).Type + if nested.Kind() == reflect.Array { + if baseElemType, ok := kindTypes[nested.Elem().Kind()]; ok { + return toInterface(dstPtr, reflect.PtrTo(reflect.ArrayOf(nested.Len(), baseElemType))) + } + } + if _, ok := kindTypes[nested.Kind()]; ok && dstPtr.CanInterface() { + return dstPtr.Interface(), true + } } } diff --git a/vendor/github.com/jackc/pgtype/go.mod b/vendor/github.com/jackc/pgtype/go.mod deleted file mode 100644 index f213388a5..000000000 --- a/vendor/github.com/jackc/pgtype/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/jackc/pgtype - -go 1.13 - -require ( - github.com/gofrs/uuid v3.2.0+incompatible - github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853 - github.com/jackc/pgio v1.0.0 - github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904 - github.com/lib/pq v1.3.0 - github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc - github.com/stretchr/testify v1.5.1 -) diff --git a/vendor/github.com/jackc/pgtype/go.sum b/vendor/github.com/jackc/pgtype/go.sum deleted file mode 100644 index 464f00911..000000000 --- a/vendor/github.com/jackc/pgtype/go.sum +++ /dev/null @@ -1,183 +0,0 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0 h1:DUwgMQuuPnS0rhMXenUtZpqZqrR/30NWY+qQvTpSvEs= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3 h1:ZFYpB74Kq8xE9gmfxCmXD6QxZ27ja+j3HwGFc+YurhQ= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb h1:d6GP9szHvXVopAOAnZ7WhRnF3Xdxrylmm/9jnfmW4Ag= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0 h1:E82UBzFyD752mvI+4RIl1WSxfO2ug64T+sLjvDBWTpA= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0 h1:oFSOilzIZkyg787M1fEmyMfOUUvwj0daqYMfaWwNL4o= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853 h1:LRlrfJW9S99uiOCY8F/qLvX1yEY1TVAaCBHFb79yHBQ= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db h1:UpaKn/gYxzH6/zWyRQH1S260zvKqwJJ4h8+Kf09ooh0= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711 h1:vZp4bYotXUkFx7JUSm7U8KV/7Q0AOdrQxxBBj0ZmZsg= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1 h1:Rdjp4NFjwHnEslx2b66FfCI2S0LhO4itac3hXz6WX9M= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 h1:Q3tB+ExeflWUW7AFcAhXqk40s9mnNYLk1nOkKNZ5GnU= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96 h1:ylEAOd688Duev/fxTmGdupsbyZfxNMdngIG14DoBKTM= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912 h1:YuOWGsSK5L4Fz81Olx5TNlZftmDuNrfv4ip0Yos77Tw= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186 h1:ZQM8qLT/E/CGD6XX0E6q9FAwxJYmWpJufzmLMaFuzgQ= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0 h1:mN7Z3n0uqPe29+tA4yLWyZNceYKgRvUWNk8qW+D066E= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9 h1:rche9LTjh3HEvkE6eb8ITYxRsgEKgBkODHrhdvDVX74= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904 h1:SdGWuGg+Cpxq6Z+ArXt0nafaKeTvtKGEoW+yvycspUU= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0 h1:/5u4a+KGJptBRqGzPvYQL9p0d/tPR4S31+Tnzj9lEO4= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a h1:Igim7XhdOpBnWPuYJ70XcNpq8q3BCACtVgNfoJxOV7g= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373 h1:PPwnA7z1Pjf7XYaBP9GL1VAMZmcIWyFz7QCMSIIa3Bg= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/vendor/github.com/jackc/pgtype/inet.go b/vendor/github.com/jackc/pgtype/inet.go index 101b9ab43..1645334e3 100644 --- a/vendor/github.com/jackc/pgtype/inet.go +++ b/vendor/github.com/jackc/pgtype/inet.go @@ -45,10 +45,11 @@ func (dst *Inet) Set(src interface{}) error { *dst = Inet{IPNet: &net.IPNet{Mask: mask, IP: value}, Status: Present} } case string: - _, ipnet, err := net.ParseCIDR(value) + ip, ipnet, err := net.ParseCIDR(value) if err != nil { return err } + ipnet.IP = ip *dst = Inet{IPNet: ipnet, Status: Present} case *net.IPNet: if value == nil { @@ -131,18 +132,22 @@ func (dst *Inet) DecodeText(ci *ConnInfo, src []byte) error { var err error if ip := net.ParseIP(string(src)); ip != nil { - ipv4 := ip.To4() - if ipv4 != nil { + if ipv4 := ip.To4(); ipv4 != nil { ip = ipv4 } bitCount := len(ip) * 8 mask := net.CIDRMask(bitCount, bitCount) ipnet = &net.IPNet{Mask: mask, IP: ip} } else { - _, ipnet, err = net.ParseCIDR(string(src)) + ip, ipnet, err = net.ParseCIDR(string(src)) if err != nil { return err } + if ipv4 := ip.To4(); ipv4 != nil { + ip = ipv4 + } + ones, _ := ipnet.Mask.Size() + *ipnet = net.IPNet{IP: ip, Mask: net.CIDRMask(ones, len(ip)*8)} } *dst = Inet{IPNet: ipnet, Status: Present} @@ -167,7 +172,10 @@ func (dst *Inet) DecodeBinary(ci *ConnInfo, src []byte) error { var ipnet net.IPNet ipnet.IP = make(net.IP, int(addressLength)) copy(ipnet.IP, src[4:]) - ipnet.Mask = net.CIDRMask(int(bits), int(addressLength)*8) + if ipv4 := ipnet.IP.To4(); ipv4 != nil { + ipnet.IP = ipv4 + } + ipnet.Mask = net.CIDRMask(int(bits), len(ipnet.IP)*8) *dst = Inet{IPNet: &ipnet, Status: Present} diff --git a/vendor/github.com/jackc/pgtype/pgtype.go b/vendor/github.com/jackc/pgtype/pgtype.go index f1d40146f..4a6808449 100644 --- a/vendor/github.com/jackc/pgtype/pgtype.go +++ b/vendor/github.com/jackc/pgtype/pgtype.go @@ -293,6 +293,7 @@ func NewConnInfo() *ConnInfo { ci.RegisterDataType(DataType{Value: &Interval{}, Name: "interval", OID: IntervalOID}) ci.RegisterDataType(DataType{Value: &JSON{}, Name: "json", OID: JSONOID}) ci.RegisterDataType(DataType{Value: &JSONB{}, Name: "jsonb", OID: JSONBOID}) + ci.RegisterDataType(DataType{Value: &JSONBArray{}, Name: "_jsonb", OID: JSONBArrayOID}) ci.RegisterDataType(DataType{Value: &Line{}, Name: "line", OID: LineOID}) ci.RegisterDataType(DataType{Value: &Lseg{}, Name: "lseg", OID: LsegOID}) ci.RegisterDataType(DataType{Value: &Macaddr{}, Name: "macaddr", OID: MacaddrOID}) diff --git a/vendor/github.com/jackc/pgx/v4/CHANGELOG.md b/vendor/github.com/jackc/pgx/v4/CHANGELOG.md index 1d8615139..ef4a2029a 100644 --- a/vendor/github.com/jackc/pgx/v4/CHANGELOG.md +++ b/vendor/github.com/jackc/pgx/v4/CHANGELOG.md @@ -1,3 +1,19 @@ +# 4.13.0 (July 24, 2021) + +* Trimmed pseudo-dependencies in Go modules from other packages tests +* Upgrade pgconn -- context cancellation no longer will return a net.Error +* Support time durations for simple protocol (Michael Darr) + +# 4.12.0 (July 10, 2021) + +* ResetSession hook is called before a connection is reused from pool for another query (Dmytro Haranzha) +* stdlib: Add RandomizeHostOrderFunc (dkinder) +* stdlib: add OptionBeforeConnect (dkinder) +* stdlib: Do not reuse ConnConfig strings (Andrew Kimball) +* stdlib: implement Conn.ResetSession (Jonathan Amsterdam) +* Upgrade pgconn to v1.9.0 +* Upgrade pgtype to v1.8.0 + # 4.11.0 (March 25, 2021) * Add BeforeConnect callback to pgxpool.Config (Robert Froehlich) diff --git a/vendor/github.com/jackc/pgx/v4/README.md b/vendor/github.com/jackc/pgx/v4/README.md index 00b11f97e..732320447 100644 --- a/vendor/github.com/jackc/pgx/v4/README.md +++ b/vendor/github.com/jackc/pgx/v4/README.md @@ -29,6 +29,7 @@ import ( ) func main() { + // urlExample := "postgres://username:password@localhost:5432/database_name" conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL")) if err != nil { fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) diff --git a/vendor/github.com/jackc/pgx/v4/go.mod b/vendor/github.com/jackc/pgx/v4/go.mod deleted file mode 100644 index 0e77d65c2..000000000 --- a/vendor/github.com/jackc/pgx/v4/go.mod +++ /dev/null @@ -1,21 +0,0 @@ -module github.com/jackc/pgx/v4 - -go 1.13 - -require ( - github.com/Masterminds/semver/v3 v3.1.1 - github.com/cockroachdb/apd v1.1.0 - github.com/go-kit/kit v0.10.0 - github.com/gofrs/uuid v3.2.0+incompatible - github.com/jackc/pgconn v1.8.1 - github.com/jackc/pgio v1.0.0 - github.com/jackc/pgproto3/v2 v2.0.6 - github.com/jackc/pgtype v1.7.0 - github.com/jackc/puddle v1.1.3 - github.com/rs/zerolog v1.15.0 - github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc - github.com/sirupsen/logrus v1.4.2 - github.com/stretchr/testify v1.5.1 - go.uber.org/zap v1.13.0 - gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec -) diff --git a/vendor/github.com/jackc/pgx/v4/go.sum b/vendor/github.com/jackc/pgx/v4/go.sum deleted file mode 100644 index c96869518..000000000 --- a/vendor/github.com/jackc/pgx/v4/go.sum +++ /dev/null @@ -1,484 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.8.1 h1:ySBX7Q87vOMqKU2bbmKbUvtYhauDFclYbNDYIE1/h6s= -github.com/jackc/pgconn v1.8.1/go.mod h1:JV6m6b6jhjdmzchES0drzCcYcAHS1OPD5xu3OZ/lE2g= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.6 h1:b1105ZGEMFe7aCvrT1Cca3VoVb4ZFMaFJLJcg/3zD+8= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.7.0 h1:6f4kVsW01QftE38ufBYxKciO6gyioXSC0ABIRLcZrGs= -github.com/jackc/pgtype v1.7.0/go.mod h1:ZnHF+rMePVqDKaOfJVI4Q8IVvAQMryDlDkZnKOI75BE= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0 h1:uPRuwkWF4J6fGsJ2R0Gn2jB1EQiav9k3S6CSdygQJXY= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114 h1:DnSr2mCsxyCE6ZgIkmcWUQY2R5cH/6wL7eIxEmQOMSE= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec h1:RlWgLqCMMIYYEVcAR5MDsuHlVkaIPDAF+5Dehzg8L5A= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/vendor/github.com/jackc/pgx/v4/stdlib/sql.go b/vendor/github.com/jackc/pgx/v4/stdlib/sql.go index 067c114fc..fa81e73d5 100644 --- a/vendor/github.com/jackc/pgx/v4/stdlib/sql.go +++ b/vendor/github.com/jackc/pgx/v4/stdlib/sql.go @@ -56,6 +56,7 @@ import ( "fmt" "io" "math" + "math/rand" "reflect" "strconv" "strings" @@ -110,18 +111,65 @@ var ( // OptionOpenDB options for configuring the driver when opening a new db pool. type OptionOpenDB func(*connector) -// OptionAfterConnect provide a callback for after connect. +// OptionBeforeConnect provides a callback for before connect. It is passed a shallow copy of the ConnConfig that will +// be used to connect, so only its immediate members should be modified. +func OptionBeforeConnect(bc func(context.Context, *pgx.ConnConfig) error) OptionOpenDB { + return func(dc *connector) { + dc.BeforeConnect = bc + } +} + +// OptionAfterConnect provides a callback for after connect. func OptionAfterConnect(ac func(context.Context, *pgx.Conn) error) OptionOpenDB { return func(dc *connector) { dc.AfterConnect = ac } } +// OptionResetSession provides a callback that can be used to add custom logic prior to executing a query on the +// connection if the connection has been used before. +// If ResetSessionFunc returns ErrBadConn error the connection will be discarded. +func OptionResetSession(rs func(context.Context, *pgx.Conn) error) OptionOpenDB { + return func(dc *connector) { + dc.ResetSession = rs + } +} + +// RandomizeHostOrderFunc is a BeforeConnect hook that randomizes the host order in the provided connConfig, so that a +// new host becomes primary each time. This is useful to distribute connections for multi-master databases like +// CockroachDB. If you use this you likely should set https://golang.org/pkg/database/sql/#DB.SetConnMaxLifetime as well +// to ensure that connections are periodically rebalanced across your nodes. +func RandomizeHostOrderFunc(ctx context.Context, connConfig *pgx.ConnConfig) error { + if len(connConfig.Fallbacks) == 0 { + return nil + } + + newFallbacks := append([]*pgconn.FallbackConfig{&pgconn.FallbackConfig{ + Host: connConfig.Host, + Port: connConfig.Port, + TLSConfig: connConfig.TLSConfig, + }}, connConfig.Fallbacks...) + + rand.Shuffle(len(newFallbacks), func(i, j int) { + newFallbacks[i], newFallbacks[j] = newFallbacks[j], newFallbacks[i] + }) + + // Use the one that sorted last as the primary and keep the rest as the fallbacks + newPrimary := newFallbacks[len(newFallbacks)-1] + connConfig.Host = newPrimary.Host + connConfig.Port = newPrimary.Port + connConfig.TLSConfig = newPrimary.TLSConfig + connConfig.Fallbacks = newFallbacks[:len(newFallbacks)-1] + return nil +} + func OpenDB(config pgx.ConnConfig, opts ...OptionOpenDB) *sql.DB { c := connector{ - ConnConfig: config, - AfterConnect: func(context.Context, *pgx.Conn) error { return nil }, // noop after connect by default - driver: pgxDriver, + ConnConfig: config, + BeforeConnect: func(context.Context, *pgx.ConnConfig) error { return nil }, // noop before connect by default + AfterConnect: func(context.Context, *pgx.Conn) error { return nil }, // noop after connect by default + ResetSession: func(context.Context, *pgx.Conn) error { return nil }, // noop reset session by default + driver: pgxDriver, } for _, opt := range opts { @@ -133,8 +181,10 @@ func OpenDB(config pgx.ConnConfig, opts ...OptionOpenDB) *sql.DB { type connector struct { pgx.ConnConfig - AfterConnect func(context.Context, *pgx.Conn) error // function to call on every new connection - driver *Driver + BeforeConnect func(context.Context, *pgx.ConnConfig) error // function to call before creation of every new connection + AfterConnect func(context.Context, *pgx.Conn) error // function to call after creation of every new connection + ResetSession func(context.Context, *pgx.Conn) error // function is called before a connection is reused + driver *Driver } // Connect implement driver.Connector interface @@ -144,7 +194,13 @@ func (c connector) Connect(ctx context.Context) (driver.Conn, error) { conn *pgx.Conn ) - if conn, err = pgx.ConnectConfig(ctx, &c.ConnConfig); err != nil { + // Create a shallow copy of the config, so that BeforeConnect can safely modify it + connConfig := c.ConnConfig + if err = c.BeforeConnect(ctx, &connConfig); err != nil { + return nil, err + } + + if conn, err = pgx.ConnectConfig(ctx, &connConfig); err != nil { return nil, err } @@ -152,7 +208,7 @@ func (c connector) Connect(ctx context.Context) (driver.Conn, error) { return nil, err } - return &Conn{conn: conn, driver: c.driver, connConfig: c.ConnConfig}, nil + return &Conn{conn: conn, driver: c.driver, connConfig: connConfig, resetSessionFunc: c.ResetSession}, nil } // Driver implement driver.Connector interface @@ -169,6 +225,7 @@ func GetDefaultDriver() driver.Driver { type Driver struct { configMutex sync.Mutex configs map[string]*pgx.ConnConfig + sequence int } func (d *Driver) Open(name string) (driver.Conn, error) { @@ -188,7 +245,8 @@ func (d *Driver) OpenConnector(name string) (driver.Connector, error) { func (d *Driver) registerConnConfig(c *pgx.ConnConfig) string { d.configMutex.Lock() - connStr := fmt.Sprintf("registeredConnConfig%d", len(d.configs)) + connStr := fmt.Sprintf("registeredConnConfig%d", d.sequence) + d.sequence++ d.configs[connStr] = c d.configMutex.Unlock() return connStr @@ -225,7 +283,13 @@ func (dc *driverConnector) Connect(ctx context.Context) (driver.Conn, error) { return nil, err } - c := &Conn{conn: conn, driver: dc.driver, connConfig: *connConfig} + c := &Conn{ + conn: conn, + driver: dc.driver, + connConfig: *connConfig, + resetSessionFunc: func(context.Context, *pgx.Conn) error { return nil }, + } + return c, nil } @@ -244,10 +308,11 @@ func UnregisterConnConfig(connStr string) { } type Conn struct { - conn *pgx.Conn - psCount int64 // Counter used for creating unique prepared statement names - driver *Driver - connConfig pgx.ConnConfig + conn *pgx.Conn + psCount int64 // Counter used for creating unique prepared statement names + driver *Driver + connConfig pgx.ConnConfig + resetSessionFunc func(context.Context, *pgx.Conn) error // Function is called before a connection is reused } // Conn returns the underlying *pgx.Conn @@ -385,6 +450,14 @@ func (c *Conn) CheckNamedValue(*driver.NamedValue) error { return nil } +func (c *Conn) ResetSession(ctx context.Context) error { + if c.conn.IsClosed() { + return driver.ErrBadConn + } + + return c.resetSessionFunc(ctx, c.conn) +} + type Stmt struct { sd *pgconn.StatementDescription conn *Conn diff --git a/vendor/github.com/jackc/pgx/v4/tx.go b/vendor/github.com/jackc/pgx/v4/tx.go index 8f3178cb0..7a296f4fe 100644 --- a/vendor/github.com/jackc/pgx/v4/tx.go +++ b/vendor/github.com/jackc/pgx/v4/tx.go @@ -42,7 +42,12 @@ type TxOptions struct { DeferrableMode TxDeferrableMode } +var emptyTxOptions TxOptions + func (txOptions TxOptions) beginSQL() string { + if txOptions == emptyTxOptions { + return "begin" + } buf := &bytes.Buffer{} buf.WriteString("begin") if txOptions.IsoLevel != "" { diff --git a/vendor/github.com/jackc/pgx/v4/values.go b/vendor/github.com/jackc/pgx/v4/values.go index 45d8ff839..1a9454753 100644 --- a/vendor/github.com/jackc/pgx/v4/values.go +++ b/vendor/github.com/jackc/pgx/v4/values.go @@ -78,6 +78,8 @@ func convertSimpleArgument(ci *pgtype.ConnInfo, arg interface{}) (interface{}, e return arg, nil case bool: return arg, nil + case time.Duration: + return fmt.Sprintf("%d microsecond", int64(arg)/1000), nil case time.Time: return arg, nil case string: diff --git a/vendor/github.com/m3db/prometheus_client_golang/AUTHORS.md b/vendor/github.com/m3db/prometheus_client_golang/AUTHORS.md deleted file mode 100644 index c5275d5ab..000000000 --- a/vendor/github.com/m3db/prometheus_client_golang/AUTHORS.md +++ /dev/null @@ -1,18 +0,0 @@ -The Prometheus project was started by Matt T. Proud (emeritus) and -Julius Volz in 2012. - -Maintainers of this repository: - -* Björn Rabenstein - -The following individuals have contributed code to this repository -(listed in alphabetical order): - -* Bernerd Schaefer -* Björn Rabenstein -* Daniel Bornkessel -* Jeff Younker -* Julius Volz -* Matt T. Proud -* Tobias Schmidt - diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/collector.go b/vendor/github.com/m3db/prometheus_client_golang/prometheus/collector.go deleted file mode 100644 index 623d3d83f..000000000 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/collector.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Registerer.Register. -// -// The stock metrics provided by this package (Gauge, Counter, Summary, -// Histogram, Untyped) are also Collectors (which only ever collect one metric, -// namely itself). An implementer of Collector may, however, collect multiple -// metrics in a coordinated fashion and/or create metrics on the fly. Examples -// for collectors already implemented in this library are the metric vectors -// (i.e. collection of multiple instances of the same Metric but with different -// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. (It is valid if one and the same Collector sends - // duplicate descriptors. Those duplicates are simply ignored. However, - // two different Collectors must not send duplicate descriptors.) This - // method idempotently sends the same descriptors throughout the - // lifetime of the Collector. If a Collector encounters an error while - // executing this method, it must send an invalid descriptor (created - // with NewInvalidDesc) to signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by the Prometheus registry when collecting - // metrics. The implementation sends each collected metric via the - // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by - // Describe. Returned metrics that share the same descriptor must differ - // in their variable label values. This method may be called - // concurrently and must therefore be implemented in a concurrency safe - // way. Blocking occurs at the expense of total performance of rendering - // all registered metrics. Ideally, Collector implementations support - // concurrent readers. - Collect(chan<- Metric) -} - -// selfCollector implements Collector for a single Metric so that the Metric -// collects itself. Add it as an anonymous field to a struct that implements -// Metric, and call init with the Metric itself as an argument. -type selfCollector struct { - self Metric -} - -// init provides the selfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *selfCollector) init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *selfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *selfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/counter.go b/vendor/github.com/m3db/prometheus_client_golang/prometheus/counter.go deleted file mode 100644 index ee37949ad..000000000 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/counter.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Set is used to set the Counter to an arbitrary value. It is only used - // if you have to transfer a value from an external counter into this - // Prometheus metric. Do not use it for regular handling of a - // Prometheus counter (as it can be used to break the contract of - // monotonically increasing values). - // - // Deprecated: Use NewConstMetric to create a counter for an external - // value. A Counter should never be set. - Set(float64) - // Inc increments the counter by 1. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// NewCounter creates a new Counter based on the provided CounterOpts. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} - result.init(result) // Init self-collection. - return result -} - -type counter struct { - value -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - c.value.Add(v) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -// -// CounterVec embeds MetricVec. See there for a full list of methods with -// detailed documentation. -type CounterVec struct { - *MetricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &CounterVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - result := &counter{value: value{ - desc: desc, - valType: CounterValue, - labelPairs: makeLabelPairs(desc, lvs), - }} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Counter and not a -// Metric so that no type conversion is required. -func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Counter and not a Metric so that no -// type conversion is required. -func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *CounterVec) WithLabelValues(lvs ...string) Counter { - return m.MetricVec.WithLabelValues(lvs...).(Counter) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *CounterVec) With(labels Labels) Counter { - return m.MetricVec.With(labels).(Counter) -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/gauge.go b/vendor/github.com/m3db/prometheus_client_golang/prometheus/gauge.go deleted file mode 100644 index 8b70e5141..000000000 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/gauge.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. - Inc() - // Dec decrements the Gauge by 1. - Dec() - // Add adds the given value to the Gauge. (The value can be - // negative, resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -func NewGauge(opts GaugeOpts) Gauge { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, 0) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - *MetricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &GaugeVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, GaugeValue, 0, lvs...) - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Gauge and not a -// Metric so that no type conversion is required. -func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Gauge and not a Metric so that no -// type conversion is required. -func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { - return m.MetricVec.WithLabelValues(lvs...).(Gauge) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *GaugeVec) With(labels Labels) Gauge { - return m.MetricVec.With(labels).(Gauge) -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. If that results in concurrent calls to Write, like in the case -// where a GaugeFunc is directly registered with Prometheus, the provided -// function must be concurrency-safe. -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/histogram.go b/vendor/github.com/m3db/prometheus_client_golang/prometheus/histogram.go deleted file mode 100644 index 31c3e707c..000000000 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/histogram.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "sort" - "sync/atomic" - - "github.com/golang/protobuf/proto" - - dto "github.com/m3db/prometheus_client_model/go" -) - -// A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. -// -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. - Observe(float64) -} - -// bucketLabel is used for the label that defines the upper bound of a -// bucket of a histogram ("le" -> "less or equal"). -const bucketLabel = "le" - -// DefBuckets are the default Histogram buckets. The default buckets are -// tailored to broadly measure the response time (in seconds) of a network -// service. Most likely, however, you will be required to define buckets -// customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) -) - -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. Mandatory! - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this - // Histogram. Histograms with the same fully-qualified name must have the - // same label names in their ConstLabels. - // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // HistogramVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Histograms with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. - Buckets []float64 -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) - } - - for _, n := range desc.variableLabels { - if n == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: makeLabelPairs(desc, labelValues), - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make counts. - h.counts = make([]uint64, len(h.upperBounds)) - - h.init(h) // Init self-collection. - return h -} - -type histogram struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 - - selfCollector - // Note that there is no mutex required. - - desc *Desc - - upperBounds []float64 - counts []uint64 - - labelPairs []*dto.LabelPair -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - i := sort.SearchFloat64s(h.upperBounds, v) - if i < len(h.counts) { - atomic.AddUint64(&h.counts[i], 1) - } - atomic.AddUint64(&h.count, 1) - for { - oldBits := atomic.LoadUint64(&h.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { - break - } - } -} - -func (h *histogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, len(h.upperBounds)) - - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) - his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) - var count uint64 - for i, upperBound := range h.upperBounds { - count += atomic.LoadUint64(&h.counts[i]) - buckets[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - } - } - his.Bucket = buckets - out.Histogram = his - out.Label = h.labelPairs - return nil -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - *MetricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &HistogramVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Histogram and not a -// Metric so that no type conversion is required. -func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Histogram), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Histogram and not a Metric so that no -// type conversion is required. -func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Histogram), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { - return m.MetricVec.WithLabelValues(lvs...).(Histogram) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *HistogramVec) With(labels Labels) Histogram { - return m.MetricVec.With(labels).(Histogram) -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstMetric would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/http.go b/vendor/github.com/m3db/prometheus_client_golang/prometheus/http.go deleted file mode 100644 index a8e5c6356..000000000 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/http.go +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bufio" - "bytes" - "compress/gzip" - "fmt" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "github.com/m3db/prometheus_common/expfmt" -) - -// TODO(beorn7): Remove this whole file. It is a partial mirror of -// promhttp/http.go (to avoid circular import chains) where everything HTTP -// related should live. The functions here are just for avoiding -// breakage. Everything is deprecated. - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) -} - -// Handler returns an HTTP handler for the DefaultGatherer. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). -// -// Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead -// (which is non instrumented). -func Handler() http.Handler { - return InstrumentHandler("prometheus", UninstrumentedHandler()) -} - -// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. -// -// Deprecated: Use promhttp.Handler instead. See there for further documentation. -func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - mfs, err := DefaultGatherer.Gather() - if err != nil { - http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - - contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf) - enc := expfmt.NewEncoder(writer, contentType) - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) - }) -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} - -var instLabels = []string{"method", "code"} - -type nower interface { - Now() time.Time -} - -type nowFunc func() time.Time - -func (n nowFunc) Now() time.Time { - return n() -} - -var now nower = nowFunc(func() time.Time { - return time.Now() -}) - -func nowSeries(t ...time.Time) nower { - return nowFunc(func() time.Time { - defer func() { - t = t[1:] - }() - - return t[0] - }) -} - -// InstrumentHandler wraps the given HTTP handler for instrumentation. It -// registers four metric collectors (if not already done) and reports HTTP -// metrics to the (newly or already) registered collectors: http_requests_total -// (CounterVec), http_request_duration_microseconds (Summary), -// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each -// has a constant label named "handler" with the provided handlerName as -// value. http_requests_total is a metric vector partitioned by HTTP method -// (label name "method") and HTTP status code (label name "code"). -// -// Deprecated: InstrumentHandler has several issues: -// -// - It uses Summaries rather than Histograms. Summaries are not useful if -// aggregation across multiple instances is required. -// -// - It uses microseconds as unit, which is deprecated and should be replaced by -// seconds. -// -// - The size of the request is calculated in a separate goroutine. Since this -// calculator requires access to the request header, it creates a race with -// any writes to the header performed during request handling. -// httputil.ReverseProxy is a prominent example for a handler -// performing such writes. -// -// Upcoming versions of this package will provide ways of instrumenting HTTP -// handlers that are more flexible and have fewer issues. Please prefer direct -// instrumentation in the meantime. -func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) -} - -// InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler (and shares the same -// issues). -// -// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. -func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts( - SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": handlerName}, - }, - handlerFunc, - ) -} - -// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same -// issues) but provides more flexibility (at the cost of a more complex call -// syntax). As InstrumentHandler, this function registers four metric -// collectors, but it uses the provided SummaryOpts to create them. However, the -// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced -// by "requests_total", "request_duration_microseconds", "request_size_bytes", -// and "response_size_bytes", respectively. "Help" is replaced by an appropriate -// help string. The names of the variable labels of the http_requests_total -// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). -// -// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the -// behavior of InstrumentHandler: -// -// prometheus.InstrumentHandlerWithOpts( -// prometheus.SummaryOpts{ -// Subsystem: "http", -// ConstLabels: prometheus.Labels{"handler": handlerName}, -// }, -// handler, -// ) -// -// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it -// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, -// and all its fields are set to the equally named fields in the provided -// SummaryOpts. -// -// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. -func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) -} - -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares -// the same issues) but provides more flexibility (at the cost of a more complex -// call syntax). See InstrumentHandlerWithOpts for details how the provided -// SummaryOpts are used. -// -// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. -func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - reqCnt := NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - ) - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - - regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) - regReqDur := MustRegisterOrGet(reqDur).(Summary) - regReqSz := MustRegisterOrGet(reqSz).(Summary) - regResSz := MustRegisterOrGet(resSz).(Summary) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - - delegate := &responseWriterDelegator{ResponseWriter: w} - out := make(chan int) - urlLen := 0 - if r.URL != nil { - urlLen = len(r.URL.String()) - } - go computeApproximateRequestSize(r, out, urlLen) - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - var rw http.ResponseWriter - if cn && fl && hj && rf { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } - handlerFunc(rw, r) - - elapsed := float64(time.Since(now)) / float64(time.Microsecond) - - method := sanitizeMethod(r.Method) - code := sanitizeCode(delegate.status) - regReqCnt.WithLabelValues(method, code).Inc() - regReqDur.Observe(elapsed) - regResSz.Observe(float64(delegate.written)) - regReqSz.Observe(float64(<-out)) - }) -} - -func computeApproximateRequestSize(r *http.Request, out chan int, s int) { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s -} - -type responseWriterDelegator struct { - http.ResponseWriter - - handler, method string - status int - written int64 - wroteHeader bool -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyResponseWriterDelegator struct { - *responseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - -func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { - if !f.wroteHeader { - f.WriteHeader(http.StatusOK) - } - n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) - f.written += n - return n, err -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/process_collector.go b/vendor/github.com/m3db/prometheus_client_golang/prometheus/process_collector.go deleted file mode 100644 index 5c410e21f..000000000 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "github.com/m3db/prometheus_procfs" - -type processCollector struct { - pid int - collectFn func(chan<- Metric) - pidFn func() (int, error) - cpuTotal Counter - openFDs, maxFDs Gauge - vsize, rss Gauge - startTime Gauge -} - -// NewProcessCollector returns a collector which exports the current state of -// process metrics including cpu, memory and file descriptor usage as well as -// the process start time for the given process id under the given namespace. -func NewProcessCollector(pid int, namespace string) Collector { - return NewProcessCollectorPIDFn( - func() (int, error) { return pid, nil }, - namespace, - ) -} - -// NewProcessCollectorPIDFn returns a collector which exports the current state -// of process metrics including cpu, memory and file descriptor usage as well -// as the process start time under the given namespace. The given pidFn is -// called on each collect and is used to determine the process to export -// metrics for. -func NewProcessCollectorPIDFn( - pidFn func() (int, error), - namespace string, -) Collector { - c := processCollector{ - pidFn: pidFn, - collectFn: func(chan<- Metric) {}, - - cpuTotal: NewCounter(CounterOpts{ - Namespace: namespace, - Name: "process_cpu_seconds_total", - Help: "Total user and system CPU time spent in seconds.", - }), - openFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_open_fds", - Help: "Number of open file descriptors.", - }), - maxFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_max_fds", - Help: "Maximum number of open file descriptors.", - }), - vsize: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_virtual_memory_bytes", - Help: "Virtual memory size in bytes.", - }), - rss: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_resident_memory_bytes", - Help: "Resident memory size in bytes.", - }), - startTime: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_start_time_seconds", - Help: "Start time of the process since unix epoch in seconds.", - }), - } - - // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewStat(); err == nil { - c.collectFn = c.processCollect - } - - return &c -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal.Desc() - ch <- c.openFDs.Desc() - ch <- c.maxFDs.Desc() - ch <- c.vsize.Desc() - ch <- c.rss.Desc() - ch <- c.startTime.Desc() -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the -// client allows users to configure the error behavior. -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - return - } - - if stat, err := p.NewStat(); err == nil { - c.cpuTotal.Set(stat.CPUTime()) - ch <- c.cpuTotal - c.vsize.Set(float64(stat.VirtualMemory())) - ch <- c.vsize - c.rss.Set(float64(stat.ResidentMemory())) - ch <- c.rss - - if startTime, err := stat.StartTime(); err == nil { - c.startTime.Set(startTime) - ch <- c.startTime - } - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - c.openFDs.Set(float64(fds)) - ch <- c.openFDs - } - - if limits, err := p.NewLimits(); err == nil { - c.maxFDs.Set(float64(limits.OpenFiles)) - ch <- c.maxFDs - } -} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/promhttp/http.go b/vendor/github.com/m3db/prometheus_client_golang/prometheus/promhttp/http.go deleted file mode 100644 index 45ac38a38..000000000 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/promhttp/http.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -// Package promhttp contains functions to create http.Handler instances to -// expose Prometheus metrics via HTTP. In later versions of this package, it -// will also contain tooling to instrument instances of http.Handler and -// http.RoundTripper. -// -// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor, -// you can create a handler for a custom registry or anything that implements -// the Gatherer interface. It also allows to create handlers that act -// differently on errors or allow to log errors. -package promhttp - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "net/http" - "strings" - "sync" - - "github.com/m3db/prometheus_common/expfmt" - - "github.com/m3db/prometheus_client_golang/prometheus" -) - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) -} - -// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The -// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP -// error, no error logging, and compression if requested by the client. -// -// If you want to create a Handler for the DefaultGatherer with different -// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and -// your desired HandlerOpts. -func Handler() http.Handler { - return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) -} - -// HandlerFor returns an http.Handler for the provided Gatherer. The behavior -// of the Handler is defined by the provided HandlerOpts. -func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - mfs, err := reg.Gather() - if err != nil { - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error gathering metrics:", err) - } - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - if len(mfs) == 0 { - http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - case HTTPErrorOnError: - http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - } - - contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf, opts.DisableCompression) - enc := expfmt.NewEncoder(writer, contentType) - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding metric family:", err) - } - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - // Handled later. - case HTTPErrorOnError: - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - } - } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) - // TODO(beorn7): Consider streaming serving of metrics. - }) -} - -// HandlerErrorHandling defines how a Handler serving metrics will handle -// errors. -type HandlerErrorHandling int - -// These constants cause handlers serving metrics to behave as described if -// errors are encountered. -const ( - // Serve an HTTP status code 500 upon the first error - // encountered. Report the error message in the body. - HTTPErrorOnError HandlerErrorHandling = iota - // Ignore errors and try to serve as many metrics as possible. However, - // if no metrics can be served, serve an HTTP status code 500 and the - // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. It is recommended to at least - // log errors (by providing an ErrorLog in HandlerOpts) to not mask - // errors completely. - ContinueOnError - // Panic upon the first error encountered (useful for "crash only" apps). - PanicOnError -) - -// Logger is the minimal interface HandlerOpts needs for logging. Note that -// log.Logger from the standard library implements this interface, and it is -// easy to implement by custom loggers, if they don't do so already anyway. -type Logger interface { - Println(v ...interface{}) -} - -// HandlerOpts specifies options how to serve metrics via an http.Handler. The -// zero value of HandlerOpts is a reasonable default. -type HandlerOpts struct { - // ErrorLog specifies an optional logger for errors collecting and - // serving metrics. If nil, errors are not logged at all. - ErrorLog Logger - // ErrorHandling defines how errors are handled. Note that errors are - // logged regardless of the configured ErrorHandling provided ErrorLog - // is not nil. - ErrorHandling HandlerErrorHandling - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. - DisableCompression bool -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { - if compressionDisabled { - return writer, "" - } - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/untyped.go b/vendor/github.com/m3db/prometheus_client_golang/prometheus/untyped.go deleted file mode 100644 index 5faf7e6e3..000000000 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/untyped.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Untyped is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// An Untyped metric works the same as a Gauge. The only difference is that to -// no type information is implied. -// -// To create Untyped instances, use NewUntyped. -type Untyped interface { - Metric - Collector - - // Set sets the Untyped metric to an arbitrary value. - Set(float64) - // Inc increments the Untyped metric by 1. - Inc() - // Dec decrements the Untyped metric by 1. - Dec() - // Add adds the given value to the Untyped metric. (The value can be - // negative, resulting in a decrease.) - Add(float64) - // Sub subtracts the given value from the Untyped metric. (The value can - // be negative, resulting in an increase.) - Sub(float64) -} - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// NewUntyped creates a new Untyped metric from the provided UntypedOpts. -func NewUntyped(opts UntypedOpts) Untyped { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, 0) -} - -// UntypedVec is a Collector that bundles a set of Untyped metrics that all -// share the same Desc, but have different values for their variable -// labels. This is used if you want to count the same thing partitioned by -// various dimensions. Create instances with NewUntypedVec. -type UntypedVec struct { - *MetricVec -} - -// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &UntypedVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, UntypedValue, 0, lvs...) - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns an Untyped and not a -// Metric so that no type conversion is required. -func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns an Untyped and not a Metric so that no -// type conversion is required. -func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { - return m.MetricVec.WithLabelValues(lvs...).(Untyped) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *UntypedVec) With(labels Labels) Untyped { - return m.MetricVec.With(labels).(Untyped) -} - -// UntypedFunc is an Untyped whose value is determined at collect time by -// calling a provided function. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/vec.go b/vendor/github.com/m3db/prometheus_client_golang/prometheus/vec.go deleted file mode 100644 index 1a4329938..000000000 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/vec.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sync" - - "github.com/m3db/prometheus_common/model" -) - -// MetricVec is a Collector to bundle metrics of the same name that -// differ in their label values. MetricVec is usually not used directly but as a -// building block for implementations of vectors of a given metric -// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already -// provided in this package. -type MetricVec struct { - mtx sync.RWMutex // Protects the children. - children map[uint64][]metricWithLabelValues - desc *Desc - - newMetric func(labelValues ...string) Metric - hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling - hashAddByte func(h uint64, b byte) uint64 -} - -// newMetricVec returns an initialized MetricVec. The concrete value is -// returned for embedding into another struct. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { - return &MetricVec{ - children: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - hashAdd: hashAdd, - hashAddByte: hashAddByte, - } -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric -} - -// Describe implements Collector. The length of the returned slice -// is always one. -func (m *MetricVec) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metrics := range m.children { - for _, metric := range metrics { - ch <- metric.metric - } - } -} - -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created. -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it at its start value (e.g. a Summary or -// Histogram without any observations). See also the SummaryVec example. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.getOrCreateMetricWithLabelValues(h, lvs), nil -} - -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.getOrCreateMetricWithLabels(h, labels), nil -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics if an error -// occurs. The method allows neat syntax like: -// httpReqs.WithLabelValues("404", "POST").Inc() -func (m *MetricVec) WithLabelValues(lvs ...string) Metric { - metric, err := m.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return metric -} - -// With works as GetMetricWith, but panics if an error occurs. The method allows -// neat syntax like: -// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() -func (m *MetricVec) With(labels Labels) Metric { - metric, err := m.GetMetricWith(labels) - if err != nil { - panic(err) - } - return metric -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual Metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - return m.deleteByHashWithLabelValues(h, lvs) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in the Desc of the MetricVec. However, such -// inconsistent Labels can never match an actual Metric, so the method will -// always return false in that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.deleteByHashWithLabels(h, labels) -} - -// deleteByHashWithLabelValues removes the metric from the hash bucket h. If -// there are multiple matches in the bucket, use lvs to select a metric and -// remove only that metric. -func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { - metrics, ok := m.children[h] - if !ok { - return false - } - - i := m.findMetricWithLabelValues(metrics, lvs) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.children, h) - } - return true -} - -// deleteByHashWithLabels removes the metric from the hash bucket h. If there -// are multiple matches in the bucket, use lvs to select a metric and remove -// only that metric. -func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { - metrics, ok := m.children[h] - if !ok { - return false - } - i := m.findMetricWithLabels(metrics, labels) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.children, h) - } - return true -} - -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.children { - delete(m.children, h) - } -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if len(vals) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, val := range vals { - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if len(labels) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, label := range m.desc.variableLabels { - val, ok := labels[label] - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithLabelValues(hash, lvs) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithLabelValues(hash, lvs) - if !ok { - // Copy to avoid allocation in case wo don't go down this code path. - copiedLVs := make([]string, len(lvs)) - copy(copiedLVs, lvs) - metric = m.newMetric(copiedLVs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) - } - return metric -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithLabels(hash, labels) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithLabels(hash, labels) - if !ok { - lvs := m.extractLabelValues(labels) - metric = m.newMetric(lvs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) - } - return metric -} - -// getMetricWithLabelValues gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { - metrics, ok := m.children[h] - if ok { - if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// getMetricWithLabels gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { - metrics, ok := m.children[h] - if ok { - if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// findMetricWithLabelValues returns the index of the matching metric or -// len(metrics) if not found. -func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { - for i, metric := range metrics { - if m.matchLabelValues(metric.values, lvs) { - return i - } - } - return len(metrics) -} - -// findMetricWithLabels returns the index of the matching metric or len(metrics) -// if not found. -func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { - for i, metric := range metrics { - if m.matchLabels(metric.values, labels) { - return i - } - } - return len(metrics) -} - -func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { - if len(values) != len(lvs) { - return false - } - for i, v := range values { - if v != lvs[i] { - return false - } - } - return true -} - -func (m *MetricVec) matchLabels(values []string, labels Labels) bool { - if len(labels) != len(values) { - return false - } - for i, k := range m.desc.variableLabels { - if values[i] != labels[k] { - return false - } - } - return true -} - -func (m *MetricVec) extractLabelValues(labels Labels) []string { - labelValues := make([]string, len(labels)) - for i, k := range m.desc.variableLabels { - labelValues[i] = labels[k] - } - return labelValues -} diff --git a/vendor/github.com/m3db/prometheus_client_model/go/metrics.pb.go b/vendor/github.com/m3db/prometheus_client_model/go/metrics.pb.go deleted file mode 100644 index e509277e6..000000000 --- a/vendor/github.com/m3db/prometheus_client_model/go/metrics.pb.go +++ /dev/null @@ -1,360 +0,0 @@ -// Code generated by protoc-gen-go. -// source: metrics.proto -// DO NOT EDIT! - -/* -Package io_prometheus_client is a generated protocol buffer package. - -It is generated from these files: - metrics.proto - -It has these top-level messages: - LabelPair - Gauge - Counter - Quantile - Summary - Untyped - Histogram - Bucket - Metric - MetricFamily -*/ -package io_prometheus_client - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type MetricType int32 - -const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 - MetricType_HISTOGRAM MetricType = 4 -) - -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", -} -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, -} - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") - if err != nil { - return err - } - *x = MetricType(value) - return nil -} - -type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile - } - return 0 -} - -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile - } - return nil -} - -type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket - } - return nil -} - -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount - } - return 0 -} - -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound - } - return 0 -} - -type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge - } - return nil -} - -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter - } - return nil -} - -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary - } - return nil -} - -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped - } - return nil -} - -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram - } - return nil -} - -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs - } - return 0 -} - -type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type - } - return MetricType_COUNTER -} - -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} diff --git a/vendor/github.com/m3db/prometheus_common/expfmt/encode.go b/vendor/github.com/m3db/prometheus_common/expfmt/encode.go deleted file mode 100644 index 171b9aacc..000000000 --- a/vendor/github.com/m3db/prometheus_common/expfmt/encode.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/m3db/prometheus_common/internal/bitbucket.org/ww/goautoneg" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - - dto "github.com/m3db/prometheus_client_model/go" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -type encoder func(*dto.MetricFamily) error - -func (e encoder) Encode(v *dto.MetricFamily) error { - return e(v) -} - -// Negotiate returns the Content-Type based on the given Accept header. -// If no appropriate accepted type is found, FmtText is returned. -func Negotiate(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - // Check for protocol buffer - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - // Check for text format. - ver := ac.Params["version"] - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - } - return FmtText -} - -// NewEncoder returns a new encoder based on content type negotiation. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: - return encoder(func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }) - case FmtProtoCompact: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }) - case FmtProtoText: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }) - case FmtText: - return encoder(func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }) - } - panic("expfmt.NewEncoder: unknown format") -} diff --git a/vendor/github.com/m3db/prometheus_common/expfmt/text_create.go b/vendor/github.com/m3db/prometheus_common/expfmt/text_create.go deleted file mode 100644 index 4957b63b8..000000000 --- a/vendor/github.com/m3db/prometheus_common/expfmt/text_create.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "math" - "strings" - - dto "github.com/m3db/prometheus_client_model/go" - "github.com/m3db/prometheus_common/model" -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. The output will have the same order as the input, -// no further sorting is performed. Furthermore, this function assumes the input -// is already sanitized and does not perform any sanity checks. If the input -// contains duplicate metrics or invalid metric or label names, the conversion -// will result in invalid text format output. -// -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { - var written int - - // Fail-fast checks. - if len(in.Metric) == 0 { - return written, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return written, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err := fmt.Fprintf( - out, "# HELP %s %s\n", - name, escapeString(*in.Help, false), - ) - written += n - if err != nil { - return written, err - } - } - metricType := in.GetType() - n, err := fmt.Fprintf( - out, "# TYPE %s %s\n", - name, strings.ToLower(metricType.String()), - ) - written += n - if err != nil { - return written, err - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Counter.GetValue(), - out, - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Gauge.GetValue(), - out, - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Untyped.GetValue(), - out, - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - name, metric, - model.QuantileLabel, fmt.Sprint(q.GetQuantile()), - q.GetValue(), - out, - ) - written += n - if err != nil { - return written, err - } - } - n, err = writeSample( - name+"_sum", metric, "", "", - metric.Summary.GetSampleSum(), - out, - ) - if err != nil { - return written, err - } - written += n - n, err = writeSample( - name+"_count", metric, "", "", - float64(metric.Summary.GetSampleCount()), - out, - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, q := range metric.Histogram.Bucket { - n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, fmt.Sprint(q.GetUpperBound()), - float64(q.GetCumulativeCount()), - out, - ) - written += n - if err != nil { - return written, err - } - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, "+Inf", - float64(metric.Histogram.GetSampleCount()), - out, - ) - if err != nil { - return written, err - } - written += n - } - n, err = writeSample( - name+"_sum", metric, "", "", - metric.Histogram.GetSampleSum(), - out, - ) - if err != nil { - return written, err - } - written += n - n, err = writeSample( - name+"_count", metric, "", "", - float64(metric.Histogram.GetSampleCount()), - out, - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return written, err - } - } - return written, nil -} - -// writeSample writes a single sample in text format to out, given the metric -// name, the metric proto message itself, optionally an additional label name -// and value (use empty strings if not required), and the value. The function -// returns the number of bytes written and any error encountered. -func writeSample( - name string, - metric *dto.Metric, - additionalLabelName, additionalLabelValue string, - value float64, - out io.Writer, -) (int, error) { - var written int - n, err := fmt.Fprint(out, name) - written += n - if err != nil { - return written, err - } - n, err = labelPairsToText( - metric.Label, - additionalLabelName, additionalLabelValue, - out, - ) - written += n - if err != nil { - return written, err - } - n, err = fmt.Fprintf(out, " %v", value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - n, err = out.Write([]byte{'\n'}) - written += n - if err != nil { - return written, err - } - return written, nil -} - -// labelPairsToText converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'out'. An empty slice in combination with an -// empty string 'additionalLabelName' results in nothing being -// written. Otherwise, the label pairs are written, escaped as required by the -// text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. -func labelPairsToText( - in []*dto.LabelPair, - additionalLabelName, additionalLabelValue string, - out io.Writer, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var written int - separator := '{' - for _, lp := range in { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, lp.GetName(), escapeString(lp.GetValue(), true), - ) - written += n - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, additionalLabelName, - escapeString(additionalLabelValue, true), - ) - written += n - if err != nil { - return written, err - } - } - n, err := out.Write([]byte{'}'}) - written += n - if err != nil { - return written, err - } - return written, nil -} - -var ( - escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) - escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) -) - -// escapeString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -func escapeString(v string, includeDoubleQuote bool) string { - if includeDoubleQuote { - return escapeWithDoubleQuote.Replace(v) - } - - return escape.Replace(v) -} diff --git a/vendor/github.com/m3db/prometheus_procfs/.travis.yml b/vendor/github.com/m3db/prometheus_procfs/.travis.yml deleted file mode 100644 index a9e28bf5d..000000000 --- a/vendor/github.com/m3db/prometheus_procfs/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -sudo: false -language: go -go: - - 1.6.4 - - 1.7.4 diff --git a/vendor/github.com/m3db/prometheus_procfs/AUTHORS.md b/vendor/github.com/m3db/prometheus_procfs/AUTHORS.md deleted file mode 100644 index d55863560..000000000 --- a/vendor/github.com/m3db/prometheus_procfs/AUTHORS.md +++ /dev/null @@ -1,21 +0,0 @@ -The Prometheus project was started by Matt T. Proud (emeritus) and -Julius Volz in 2012. - -Maintainers of this repository: - -* Tobias Schmidt - -The following individuals have contributed code to this repository -(listed in alphabetical order): - -* Armen Baghumian -* Bjoern Rabenstein -* David Cournapeau -* Ji-Hoon, Seol -* Jonas Große Sundrup -* Julius Volz -* Matt Layher -* Matthias Rampke -* Nicky Gerritsen -* Rémi Audebert -* Tobias Schmidt diff --git a/vendor/github.com/m3db/prometheus_procfs/CONTRIBUTING.md b/vendor/github.com/m3db/prometheus_procfs/CONTRIBUTING.md deleted file mode 100644 index 5705f0fbe..000000000 --- a/vendor/github.com/m3db/prometheus_procfs/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull - request, addressing (with `@...`) one or more of the maintainers - (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/m3db/prometheus_procfs/Makefile b/vendor/github.com/m3db/prometheus_procfs/Makefile deleted file mode 100644 index c264a49d1..000000000 --- a/vendor/github.com/m3db/prometheus_procfs/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -ci: - ! gofmt -l *.go | read nothing - go vet - go test -v ./... - go get github.com/golang/lint/golint - golint *.go diff --git a/vendor/github.com/m3db/prometheus_procfs/README.md b/vendor/github.com/m3db/prometheus_procfs/README.md deleted file mode 100644 index 6e7ee6b8b..000000000 --- a/vendor/github.com/m3db/prometheus_procfs/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# procfs - -This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) -[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) diff --git a/vendor/github.com/m3db/prometheus_procfs/fs.go b/vendor/github.com/m3db/prometheus_procfs/fs.go deleted file mode 100644 index 49aaab050..000000000 --- a/vendor/github.com/m3db/prometheus_procfs/fs.go +++ /dev/null @@ -1,33 +0,0 @@ -package procfs - -import ( - "fmt" - "os" - "path" -) - -// FS represents the pseudo-filesystem proc, which provides an interface to -// kernel data structures. -type FS string - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) -} diff --git a/vendor/github.com/m3db/prometheus_procfs/mdstat.go b/vendor/github.com/m3db/prometheus_procfs/mdstat.go deleted file mode 100644 index d7a248c0d..000000000 --- a/vendor/github.com/m3db/prometheus_procfs/mdstat.go +++ /dev/null @@ -1,138 +0,0 @@ -package procfs - -import ( - "fmt" - "io/ioutil" - "regexp" - "strconv" - "strings" -) - -var ( - statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) -) - -// MDStat holds info parsed from /proc/mdstat. -type MDStat struct { - // Name of the device. - Name string - // activity-state of the device. - ActivityState string - // Number of active disks. - DisksActive int64 - // Total number of disks the device consists of. - DisksTotal int64 - // Number of blocks the device holds. - BlocksTotal int64 - // Number of blocks on the device that are in sync. - BlocksSynced int64 -} - -// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. -func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { - mdStatusFilePath := fs.Path("mdstat") - content, err := ioutil.ReadFile(mdStatusFilePath) - if err != nil { - return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - - mdStates := []MDStat{} - lines := strings.Split(string(content), "\n") - for i, l := range lines { - if l == "" { - continue - } - if l[0] == ' ' { - continue - } - if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { - continue - } - - mainLine := strings.Split(l, " ") - if len(mainLine) < 3 { - return mdStates, fmt.Errorf("error parsing mdline: %s", l) - } - mdName := mainLine[0] - activityState := mainLine[2] - - if len(lines) <= i+3 { - return mdStates, fmt.Errorf( - "error parsing %s: too few lines for md device %s", - mdStatusFilePath, - mdName, - ) - } - - active, total, size, err := evalStatusline(lines[i+1]) - if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - - // j is the line number of the syncing-line. - j := i + 2 - if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - j = i + 3 - } - - // If device is syncing at the moment, get the number of currently - // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size - if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { - syncedBlocks, err = evalBuildline(lines[j]) - if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - } - - mdStates = append(mdStates, MDStat{ - Name: mdName, - ActivityState: activityState, - DisksActive: active, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: syncedBlocks, - }) - } - - return mdStates, nil -} - -func evalStatusline(statusline string) (active, total, size int64, err error) { - matches := statuslineRE.FindStringSubmatch(statusline) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) - } - - size, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - total, err = strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - active, err = strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - return active, total, size, nil -} - -func evalBuildline(buildline string) (syncedBlocks int64, err error) { - matches := buildlineRE.FindStringSubmatch(buildline) - if len(matches) != 2 { - return 0, fmt.Errorf("unexpected buildline: %s", buildline) - } - - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, fmt.Errorf("%s in buildline: %s", err, buildline) - } - - return syncedBlocks, nil -} diff --git a/vendor/github.com/m3db/prometheus_procfs/stat.go b/vendor/github.com/m3db/prometheus_procfs/stat.go deleted file mode 100644 index 1ca217e8c..000000000 --- a/vendor/github.com/m3db/prometheus_procfs/stat.go +++ /dev/null @@ -1,56 +0,0 @@ -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime int64 -} - -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - -// NewStat returns an information about current kernel/system statistics. -func (fs FS) NewStat() (Stat, error) { - f, err := os.Open(fs.Path("stat")) - if err != nil { - return Stat{}, err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - line := s.Text() - if !strings.HasPrefix(line, "btime") { - continue - } - fields := strings.Fields(line) - if len(fields) != 2 { - return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) - } - i, err := strconv.ParseInt(fields[1], 10, 32) - if err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) - } - return Stat{BootTime: i}, nil - } - if err := s.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) - } - - return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) -} diff --git a/vendor/github.com/mschoch/smat/go.mod b/vendor/github.com/mschoch/smat/go.mod deleted file mode 100644 index 70c9eed57..000000000 --- a/vendor/github.com/mschoch/smat/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/mschoch/smat - -go 1.13 diff --git a/vendor/github.com/philhofer/fwd/LICENSE.md b/vendor/github.com/philhofer/fwd/LICENSE.md deleted file mode 100644 index 1ac6a81f6..000000000 --- a/vendor/github.com/philhofer/fwd/LICENSE.md +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2014-2015, Philip Hofer - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/README.md b/vendor/github.com/philhofer/fwd/README.md deleted file mode 100644 index 38349af34..000000000 --- a/vendor/github.com/philhofer/fwd/README.md +++ /dev/null @@ -1,315 +0,0 @@ - -# fwd - import "github.com/philhofer/fwd" - -The `fwd` package provides a buffered reader -and writer. Each has methods that help improve -the encoding/decoding performance of some binary -protocols. - -The `fwd.Writer` and `fwd.Reader` type provide similar -functionality to their counterparts in `bufio`, plus -a few extra utility methods that simplify read-ahead -and write-ahead. I wrote this package to improve serialization -performance for http://github.com/tinylib/msgp, -where it provided about a 2x speedup over `bufio` for certain -workloads. However, care must be taken to understand the semantics of the -extra methods provided by this package, as they allow -the user to access and manipulate the buffer memory -directly. - -The extra methods for `fwd.Reader` are `Peek`, `Skip` -and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, -will re-allocate the read buffer in order to accommodate arbitrarily -large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes -in the stream, and uses the `io.Seeker` interface if the underlying -stream implements it. `(*fwd.Reader).Next` returns a slice pointing -to the next `n` bytes in the read buffer (like `Peek`), but also -increments the read position. This allows users to process streams -in arbitrary block sizes without having to manage appropriately-sized -slices. Additionally, obviating the need to copy the data from the -buffer to another location in memory can improve performance dramatically -in CPU-bound applications. - -`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which -returns a slice pointing to the next `n` bytes of the writer, and increments -the write position by the length of the returned slice. This allows users -to write directly to the end of the buffer. - - - - -## Constants -``` go -const ( - // DefaultReaderSize is the default size of the read buffer - DefaultReaderSize = 2048 -) -``` -``` go -const ( - // DefaultWriterSize is the - // default write buffer size. - DefaultWriterSize = 2048 -) -``` - - - -## type Reader -``` go -type Reader struct { - // contains filtered or unexported fields -} -``` -Reader is a buffered look-ahead reader - - - - - - - - - -### func NewReader -``` go -func NewReader(r io.Reader) *Reader -``` -NewReader returns a new *Reader that reads from 'r' - - -### func NewReaderSize -``` go -func NewReaderSize(r io.Reader, n int) *Reader -``` -NewReaderSize returns a new *Reader that -reads from 'r' and has a buffer size 'n' - - - - -### func (\*Reader) BufferSize -``` go -func (r *Reader) BufferSize() int -``` -BufferSize returns the total size of the buffer - - - -### func (\*Reader) Buffered -``` go -func (r *Reader) Buffered() int -``` -Buffered returns the number of bytes currently in the buffer - - - -### func (\*Reader) Next -``` go -func (r *Reader) Next(n int) ([]byte, error) -``` -Next returns the next 'n' bytes in the stream. -Unlike Peek, Next advances the reader position. -The returned bytes point to the same -data as the buffer, so the slice is -only valid until the next reader method call. -An EOF is considered an unexpected error. -If an the returned slice is less than the -length asked for, an error will be returned, -and the reader position will not be incremented. - - - -### func (\*Reader) Peek -``` go -func (r *Reader) Peek(n int) ([]byte, error) -``` -Peek returns the next 'n' buffered bytes, -reading from the underlying reader if necessary. -It will only return a slice shorter than 'n' bytes -if it also returns an error. Peek does not advance -the reader. EOF errors are *not* returned as -io.ErrUnexpectedEOF. - - - -### func (\*Reader) Read -``` go -func (r *Reader) Read(b []byte) (int, error) -``` -Read implements `io.Reader` - - - -### func (\*Reader) ReadByte -``` go -func (r *Reader) ReadByte() (byte, error) -``` -ReadByte implements `io.ByteReader` - - - -### func (\*Reader) ReadFull -``` go -func (r *Reader) ReadFull(b []byte) (int, error) -``` -ReadFull attempts to read len(b) bytes into -'b'. It returns the number of bytes read into -'b', and an error if it does not return len(b). -EOF is considered an unexpected error. - - - -### func (\*Reader) Reset -``` go -func (r *Reader) Reset(rd io.Reader) -``` -Reset resets the underlying reader -and the read buffer. - - - -### func (\*Reader) Skip -``` go -func (r *Reader) Skip(n int) (int, error) -``` -Skip moves the reader forward 'n' bytes. -Returns the number of bytes skipped and any -errors encountered. It is analogous to Seek(n, 1). -If the underlying reader implements io.Seeker, then -that method will be used to skip forward. - -If the reader encounters -an EOF before skipping 'n' bytes, it -returns io.ErrUnexpectedEOF. If the -underlying reader implements io.Seeker, then -those rules apply instead. (Many implementations -will not return `io.EOF` until the next call -to Read.) - - - -### func (\*Reader) WriteTo -``` go -func (r *Reader) WriteTo(w io.Writer) (int64, error) -``` -WriteTo implements `io.WriterTo` - - - -## type Writer -``` go -type Writer struct { - // contains filtered or unexported fields -} -``` -Writer is a buffered writer - - - - - - - - - -### func NewWriter -``` go -func NewWriter(w io.Writer) *Writer -``` -NewWriter returns a new writer -that writes to 'w' and has a buffer -that is `DefaultWriterSize` bytes. - - -### func NewWriterSize -``` go -func NewWriterSize(w io.Writer, size int) *Writer -``` -NewWriterSize returns a new writer -that writes to 'w' and has a buffer -that is 'size' bytes. - - - - -### func (\*Writer) BufferSize -``` go -func (w *Writer) BufferSize() int -``` -BufferSize returns the maximum size of the buffer. - - - -### func (\*Writer) Buffered -``` go -func (w *Writer) Buffered() int -``` -Buffered returns the number of buffered bytes -in the reader. - - - -### func (\*Writer) Flush -``` go -func (w *Writer) Flush() error -``` -Flush flushes any buffered bytes -to the underlying writer. - - - -### func (\*Writer) Next -``` go -func (w *Writer) Next(n int) ([]byte, error) -``` -Next returns the next 'n' free bytes -in the write buffer, flushing the writer -as necessary. Next will return `io.ErrShortBuffer` -if 'n' is greater than the size of the write buffer. -Calls to 'next' increment the write position by -the size of the returned buffer. - - - -### func (\*Writer) ReadFrom -``` go -func (w *Writer) ReadFrom(r io.Reader) (int64, error) -``` -ReadFrom implements `io.ReaderFrom` - - - -### func (\*Writer) Write -``` go -func (w *Writer) Write(p []byte) (int, error) -``` -Write implements `io.Writer` - - - -### func (\*Writer) WriteByte -``` go -func (w *Writer) WriteByte(b byte) error -``` -WriteByte implements `io.ByteWriter` - - - -### func (\*Writer) WriteString -``` go -func (w *Writer) WriteString(s string) (int, error) -``` -WriteString is analogous to Write, but it takes a string. - - - - - - - - - -- - - -Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/reader.go b/vendor/github.com/philhofer/fwd/reader.go deleted file mode 100644 index 75be62ab0..000000000 --- a/vendor/github.com/philhofer/fwd/reader.go +++ /dev/null @@ -1,383 +0,0 @@ -// The `fwd` package provides a buffered reader -// and writer. Each has methods that help improve -// the encoding/decoding performance of some binary -// protocols. -// -// The `fwd.Writer` and `fwd.Reader` type provide similar -// functionality to their counterparts in `bufio`, plus -// a few extra utility methods that simplify read-ahead -// and write-ahead. I wrote this package to improve serialization -// performance for http://github.com/tinylib/msgp, -// where it provided about a 2x speedup over `bufio` for certain -// workloads. However, care must be taken to understand the semantics of the -// extra methods provided by this package, as they allow -// the user to access and manipulate the buffer memory -// directly. -// -// The extra methods for `fwd.Reader` are `Peek`, `Skip` -// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, -// will re-allocate the read buffer in order to accommodate arbitrarily -// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes -// in the stream, and uses the `io.Seeker` interface if the underlying -// stream implements it. `(*fwd.Reader).Next` returns a slice pointing -// to the next `n` bytes in the read buffer (like `Peek`), but also -// increments the read position. This allows users to process streams -// in arbitrary block sizes without having to manage appropriately-sized -// slices. Additionally, obviating the need to copy the data from the -// buffer to another location in memory can improve performance dramatically -// in CPU-bound applications. -// -// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which -// returns a slice pointing to the next `n` bytes of the writer, and increments -// the write position by the length of the returned slice. This allows users -// to write directly to the end of the buffer. -// -package fwd - -import "io" - -const ( - // DefaultReaderSize is the default size of the read buffer - DefaultReaderSize = 2048 - - // minimum read buffer; straight from bufio - minReaderSize = 16 -) - -// NewReader returns a new *Reader that reads from 'r' -func NewReader(r io.Reader) *Reader { - return NewReaderSize(r, DefaultReaderSize) -} - -// NewReaderSize returns a new *Reader that -// reads from 'r' and has a buffer size 'n' -func NewReaderSize(r io.Reader, n int) *Reader { - rd := &Reader{ - r: r, - data: make([]byte, 0, max(minReaderSize, n)), - } - if s, ok := r.(io.Seeker); ok { - rd.rs = s - } - return rd -} - -// Reader is a buffered look-ahead reader -type Reader struct { - r io.Reader // underlying reader - - // data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space - data []byte // data - n int // read offset - state error // last read error - - // if the reader past to NewReader was - // also an io.Seeker, this is non-nil - rs io.Seeker -} - -// Reset resets the underlying reader -// and the read buffer. -func (r *Reader) Reset(rd io.Reader) { - r.r = rd - r.data = r.data[0:0] - r.n = 0 - r.state = nil - if s, ok := rd.(io.Seeker); ok { - r.rs = s - } else { - r.rs = nil - } -} - -// more() does one read on the underlying reader -func (r *Reader) more() { - // move data backwards so that - // the read offset is 0; this way - // we can supply the maximum number of - // bytes to the reader - if r.n != 0 { - if r.n < len(r.data) { - r.data = r.data[:copy(r.data[0:], r.data[r.n:])] - } else { - r.data = r.data[:0] - } - r.n = 0 - } - var a int - a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)]) - if a == 0 && r.state == nil { - r.state = io.ErrNoProgress - return - } else if a > 0 && r.state == io.EOF { - // discard the io.EOF if we read more than 0 bytes. - // the next call to Read should return io.EOF again. - r.state = nil - } - r.data = r.data[:len(r.data)+a] -} - -// pop error -func (r *Reader) err() (e error) { - e, r.state = r.state, nil - return -} - -// pop error; EOF -> io.ErrUnexpectedEOF -func (r *Reader) noEOF() (e error) { - e, r.state = r.state, nil - if e == io.EOF { - e = io.ErrUnexpectedEOF - } - return -} - -// buffered bytes -func (r *Reader) buffered() int { return len(r.data) - r.n } - -// Buffered returns the number of bytes currently in the buffer -func (r *Reader) Buffered() int { return len(r.data) - r.n } - -// BufferSize returns the total size of the buffer -func (r *Reader) BufferSize() int { return cap(r.data) } - -// Peek returns the next 'n' buffered bytes, -// reading from the underlying reader if necessary. -// It will only return a slice shorter than 'n' bytes -// if it also returns an error. Peek does not advance -// the reader. EOF errors are *not* returned as -// io.ErrUnexpectedEOF. -func (r *Reader) Peek(n int) ([]byte, error) { - // in the degenerate case, - // we may need to realloc - // (the caller asked for more - // bytes than the size of the buffer) - if cap(r.data) < n { - old := r.data[r.n:] - r.data = make([]byte, n+r.buffered()) - r.data = r.data[:copy(r.data, old)] - r.n = 0 - } - - // keep filling until - // we hit an error or - // read enough bytes - for r.buffered() < n && r.state == nil { - r.more() - } - - // we must have hit an error - if r.buffered() < n { - return r.data[r.n:], r.err() - } - - return r.data[r.n : r.n+n], nil -} - -// Skip moves the reader forward 'n' bytes. -// Returns the number of bytes skipped and any -// errors encountered. It is analogous to Seek(n, 1). -// If the underlying reader implements io.Seeker, then -// that method will be used to skip forward. -// -// If the reader encounters -// an EOF before skipping 'n' bytes, it -// returns io.ErrUnexpectedEOF. If the -// underlying reader implements io.Seeker, then -// those rules apply instead. (Many implementations -// will not return `io.EOF` until the next call -// to Read.) -func (r *Reader) Skip(n int) (int, error) { - - // fast path - if r.buffered() >= n { - r.n += n - return n, nil - } - - // use seeker implementation - // if we can - if r.rs != nil { - return r.skipSeek(n) - } - - // loop on filling - // and then erasing - o := n - for r.buffered() < n && r.state == nil { - r.more() - // we can skip forward - // up to r.buffered() bytes - step := min(r.buffered(), n) - r.n += step - n -= step - } - // at this point, n should be - // 0 if everything went smoothly - return o - n, r.noEOF() -} - -// Next returns the next 'n' bytes in the stream. -// Unlike Peek, Next advances the reader position. -// The returned bytes point to the same -// data as the buffer, so the slice is -// only valid until the next reader method call. -// An EOF is considered an unexpected error. -// If an the returned slice is less than the -// length asked for, an error will be returned, -// and the reader position will not be incremented. -func (r *Reader) Next(n int) ([]byte, error) { - - // in case the buffer is too small - if cap(r.data) < n { - old := r.data[r.n:] - r.data = make([]byte, n+r.buffered()) - r.data = r.data[:copy(r.data, old)] - r.n = 0 - } - - // fill at least 'n' bytes - for r.buffered() < n && r.state == nil { - r.more() - } - - if r.buffered() < n { - return r.data[r.n:], r.noEOF() - } - out := r.data[r.n : r.n+n] - r.n += n - return out, nil -} - -// skipSeek uses the io.Seeker to seek forward. -// only call this function when n > r.buffered() -func (r *Reader) skipSeek(n int) (int, error) { - o := r.buffered() - // first, clear buffer - n -= o - r.n = 0 - r.data = r.data[:0] - - // then seek forward remaning bytes - i, err := r.rs.Seek(int64(n), 1) - return int(i) + o, err -} - -// Read implements `io.Reader` -func (r *Reader) Read(b []byte) (int, error) { - // if we have data in the buffer, just - // return that. - if r.buffered() != 0 { - x := copy(b, r.data[r.n:]) - r.n += x - return x, nil - } - var n int - // we have no buffered data; determine - // whether or not to buffer or call - // the underlying reader directly - if len(b) >= cap(r.data) { - n, r.state = r.r.Read(b) - } else { - r.more() - n = copy(b, r.data) - r.n = n - } - if n == 0 { - return 0, r.err() - } - return n, nil -} - -// ReadFull attempts to read len(b) bytes into -// 'b'. It returns the number of bytes read into -// 'b', and an error if it does not return len(b). -// EOF is considered an unexpected error. -func (r *Reader) ReadFull(b []byte) (int, error) { - var n int // read into b - var nn int // scratch - l := len(b) - // either read buffered data, - // or read directly for the underlying - // buffer, or fetch more buffered data. - for n < l && r.state == nil { - if r.buffered() != 0 { - nn = copy(b[n:], r.data[r.n:]) - n += nn - r.n += nn - } else if l-n > cap(r.data) { - nn, r.state = r.r.Read(b[n:]) - n += nn - } else { - r.more() - } - } - if n < l { - return n, r.noEOF() - } - return n, nil -} - -// ReadByte implements `io.ByteReader` -func (r *Reader) ReadByte() (byte, error) { - for r.buffered() < 1 && r.state == nil { - r.more() - } - if r.buffered() < 1 { - return 0, r.err() - } - b := r.data[r.n] - r.n++ - return b, nil -} - -// WriteTo implements `io.WriterTo` -func (r *Reader) WriteTo(w io.Writer) (int64, error) { - var ( - i int64 - ii int - err error - ) - // first, clear buffer - if r.buffered() > 0 { - ii, err = w.Write(r.data[r.n:]) - i += int64(ii) - if err != nil { - return i, err - } - r.data = r.data[0:0] - r.n = 0 - } - for r.state == nil { - // here we just do - // 1:1 reads and writes - r.more() - if r.buffered() > 0 { - ii, err = w.Write(r.data) - i += int64(ii) - if err != nil { - return i, err - } - r.data = r.data[0:0] - r.n = 0 - } - } - if r.state != io.EOF { - return i, r.err() - } - return i, nil -} - -func min(a int, b int) int { - if a < b { - return a - } - return b -} - -func max(a int, b int) int { - if a < b { - return b - } - return a -} diff --git a/vendor/github.com/philhofer/fwd/writer.go b/vendor/github.com/philhofer/fwd/writer.go deleted file mode 100644 index 2dc392a91..000000000 --- a/vendor/github.com/philhofer/fwd/writer.go +++ /dev/null @@ -1,224 +0,0 @@ -package fwd - -import "io" - -const ( - // DefaultWriterSize is the - // default write buffer size. - DefaultWriterSize = 2048 - - minWriterSize = minReaderSize -) - -// Writer is a buffered writer -type Writer struct { - w io.Writer // writer - buf []byte // 0:len(buf) is bufered data -} - -// NewWriter returns a new writer -// that writes to 'w' and has a buffer -// that is `DefaultWriterSize` bytes. -func NewWriter(w io.Writer) *Writer { - if wr, ok := w.(*Writer); ok { - return wr - } - return &Writer{ - w: w, - buf: make([]byte, 0, DefaultWriterSize), - } -} - -// NewWriterSize returns a new writer -// that writes to 'w' and has a buffer -// that is 'size' bytes. -func NewWriterSize(w io.Writer, size int) *Writer { - if wr, ok := w.(*Writer); ok && cap(wr.buf) >= size { - return wr - } - return &Writer{ - w: w, - buf: make([]byte, 0, max(size, minWriterSize)), - } -} - -// Buffered returns the number of buffered bytes -// in the reader. -func (w *Writer) Buffered() int { return len(w.buf) } - -// BufferSize returns the maximum size of the buffer. -func (w *Writer) BufferSize() int { return cap(w.buf) } - -// Flush flushes any buffered bytes -// to the underlying writer. -func (w *Writer) Flush() error { - l := len(w.buf) - if l > 0 { - n, err := w.w.Write(w.buf) - - // if we didn't write the whole - // thing, copy the unwritten - // bytes to the beginnning of the - // buffer. - if n < l && n > 0 { - w.pushback(n) - if err == nil { - err = io.ErrShortWrite - } - } - if err != nil { - return err - } - w.buf = w.buf[:0] - return nil - } - return nil -} - -// Write implements `io.Writer` -func (w *Writer) Write(p []byte) (int, error) { - c, l, ln := cap(w.buf), len(w.buf), len(p) - avail := c - l - - // requires flush - if avail < ln { - if err := w.Flush(); err != nil { - return 0, err - } - l = len(w.buf) - } - // too big to fit in buffer; - // write directly to w.w - if c < ln { - return w.w.Write(p) - } - - // grow buf slice; copy; return - w.buf = w.buf[:l+ln] - return copy(w.buf[l:], p), nil -} - -// WriteString is analogous to Write, but it takes a string. -func (w *Writer) WriteString(s string) (int, error) { - c, l, ln := cap(w.buf), len(w.buf), len(s) - avail := c - l - - // requires flush - if avail < ln { - if err := w.Flush(); err != nil { - return 0, err - } - l = len(w.buf) - } - // too big to fit in buffer; - // write directly to w.w - // - // yes, this is unsafe. *but* - // io.Writer is not allowed - // to mutate its input or - // maintain a reference to it, - // per the spec in package io. - // - // plus, if the string is really - // too big to fit in the buffer, then - // creating a copy to write it is - // expensive (and, strictly speaking, - // unnecessary) - if c < ln { - return w.w.Write(unsafestr(s)) - } - - // grow buf slice; copy; return - w.buf = w.buf[:l+ln] - return copy(w.buf[l:], s), nil -} - -// WriteByte implements `io.ByteWriter` -func (w *Writer) WriteByte(b byte) error { - if len(w.buf) == cap(w.buf) { - if err := w.Flush(); err != nil { - return err - } - } - w.buf = append(w.buf, b) - return nil -} - -// Next returns the next 'n' free bytes -// in the write buffer, flushing the writer -// as necessary. Next will return `io.ErrShortBuffer` -// if 'n' is greater than the size of the write buffer. -// Calls to 'next' increment the write position by -// the size of the returned buffer. -func (w *Writer) Next(n int) ([]byte, error) { - c, l := cap(w.buf), len(w.buf) - if n > c { - return nil, io.ErrShortBuffer - } - avail := c - l - if avail < n { - if err := w.Flush(); err != nil { - return nil, err - } - l = len(w.buf) - } - w.buf = w.buf[:l+n] - return w.buf[l:], nil -} - -// take the bytes from w.buf[n:len(w.buf)] -// and put them at the beginning of w.buf, -// and resize to the length of the copied segment. -func (w *Writer) pushback(n int) { - w.buf = w.buf[:copy(w.buf, w.buf[n:])] -} - -// ReadFrom implements `io.ReaderFrom` -func (w *Writer) ReadFrom(r io.Reader) (int64, error) { - // anticipatory flush - if err := w.Flush(); err != nil { - return 0, err - } - - w.buf = w.buf[0:cap(w.buf)] // expand buffer - - var nn int64 // written - var err error // error - var x int // read - - // 1:1 reads and writes - for err == nil { - x, err = r.Read(w.buf) - if x > 0 { - n, werr := w.w.Write(w.buf[:x]) - nn += int64(n) - - if err != nil { - if n < x && n > 0 { - w.pushback(n - x) - } - return nn, werr - } - if n < x { - w.pushback(n - x) - return nn, io.ErrShortWrite - } - } else if err == nil { - err = io.ErrNoProgress - break - } - } - if err != io.EOF { - return nn, err - } - - // we only clear here - // because we are sure - // the writes have - // succeeded. otherwise, - // we retain the data in case - // future writes succeed. - w.buf = w.buf[0:0] - - return nn, nil -} diff --git a/vendor/github.com/philhofer/fwd/writer_appengine.go b/vendor/github.com/philhofer/fwd/writer_appengine.go deleted file mode 100644 index e367f3931..000000000 --- a/vendor/github.com/philhofer/fwd/writer_appengine.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build appengine - -package fwd - -func unsafestr(s string) []byte { return []byte(s) } diff --git a/vendor/github.com/philhofer/fwd/writer_unsafe.go b/vendor/github.com/philhofer/fwd/writer_unsafe.go deleted file mode 100644 index a0bf453b3..000000000 --- a/vendor/github.com/philhofer/fwd/writer_unsafe.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !appengine - -package fwd - -import ( - "reflect" - "unsafe" -) - -// unsafe cast string as []byte -func unsafestr(b string) []byte { - l := len(b) - return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Len: l, - Cap: l, - Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data, - })) -} diff --git a/vendor/github.com/m3db/prometheus_client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE similarity index 100% rename from vendor/github.com/m3db/prometheus_client_golang/LICENSE rename to vendor/github.com/prometheus/client_golang/LICENSE diff --git a/vendor/github.com/m3db/prometheus_client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE similarity index 100% rename from vendor/github.com/m3db/prometheus_client_golang/NOTICE rename to vendor/github.com/prometheus/client_golang/NOTICE diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore similarity index 100% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/.gitignore rename to vendor/github.com/prometheus/client_golang/prometheus/.gitignore diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md similarity index 100% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/README.md rename to vendor/github.com/prometheus/client_golang/prometheus/README.md diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 000000000..1e839650d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. + // + // It is valid if one and the same Collector sends duplicate + // descriptors. Those duplicates are simply ignored. However, two + // different Collectors must not send duplicate descriptors. + // + // Sending no descriptor at all marks the Collector as “unchecked”, + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + // + // This method idempotently sends the same descriptors throughout the + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. + // + // If a Collector encounters an error while executing this method, it + // must send an invalid descriptor (created with NewInvalidDesc) to + // signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by Describe + // (unless the Collector is unchecked, see above). Returned metrics that + // share the same descriptor must differ in their variable label + // values. + // + // This method may be called concurrently and must therefore be + // implemented in a concurrency safe way. Blocking occurs at the expense + // of total performance of rendering all registered metrics. Ideally, + // Collector implementations support concurrent readers. + Collect(chan<- Metric) +} + +// DescribeByCollect is a helper to implement the Describe method of a custom +// Collector. It collects the metrics from the provided Collector and sends +// their descriptors to the provided channel. +// +// If a Collector collects the same metrics throughout its lifetime, its +// Describe method can simply be implemented as: +// +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } +// +// However, this will not work if the metrics collected change dynamically over +// the lifetime of the Collector in a way that their combined set of descriptors +// changes as well. The shortcut implementation will then violate the contract +// of the Describe method. If a Collector sometimes collects no metrics at all +// (for example vectors like CounterVec, GaugeVec, etc., which only collect +// metrics after a metric with a fully specified label set has been accessed), +// it might even get registered as an unchecked Collector (cf. the Register +// method of the Registerer interface). Hence, only use this shortcut +// implementation of Describe if you are certain to fulfill the contract. +// +// The Collector example demonstrates a use of DescribeByCollect. +func DescribeByCollect(c Collector, descs chan<- *Desc) { + metrics := make(chan Metric) + go func() { + c.Collect(metrics) + close(metrics) + }() + for m := range metrics { + descs <- m.Desc() + } +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 000000000..3f8fd790d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,321 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// ExemplarAdder is implemented by Counters that offer the option of adding a +// value to the Counter together with an exemplar. Its AddWithExemplar method +// works like the Add method of the Counter interface but also replaces the +// currently saved exemplar (if any) with a new one, created from the provided +// value, the current time as timestamp, and the provided labels. Empty Labels +// will lead to a valid (label-less) exemplar. But if Labels is nil, the current +// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any +// of the provided labels are invalid, or if the provided labels contain more +// than 64 runes in total. +type ExemplarAdder interface { + AddWithExemplar(value float64, exemplar Labels) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation also implements ExemplarAdder. It is safe to +// perform the corresponding type assertion. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair + exemplar atomic.Value // Containing nil or a *dto.Exemplar. + + now func() time.Time // To mock out time.Now() for testing. +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) AddWithExemplar(v float64, e Labels) { + c.Add(v) + c.updateExemplar(v, e) +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + var exemplar *dto.Exemplar + if e := c.exemplar.Load(); e != nil { + exemplar = e.(*dto.Exemplar) + } + + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) +} + +func (c *counter) updateExemplar(v float64, l Labels) { + if l == nil { + return + } + e, err := newExemplar(v, c.now(), l) + if err != nil { + panic(err) + } + c.exemplar.Store(e) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.MetricVec.CurryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +// +// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go similarity index 76% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/desc.go rename to vendor/github.com/prometheus/client_golang/prometheus/desc.go index cadbc69b3..4bb816ab7 100644 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -16,33 +16,17 @@ package prometheus import ( "errors" "fmt" - "regexp" "sort" "strings" + "github.com/cespare/xxhash/v2" + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" - dto "github.com/m3db/prometheus_client_model/go" + dto "github.com/prometheus/client_model/go" ) -var ( - metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) - labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") -) - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - // Desc is the descriptor used by every Prometheus Metric. It is essentially // the immutable meta-data of a Metric. The normal Metric implementations // included in this package manage their Desc under the hood. Users only have to @@ -67,7 +51,7 @@ type Desc struct { // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. constLabelPairs []*dto.LabelPair - // VariableLabels contains names of labels for which the metric + // variableLabels contains names of labels for which the metric // maintains variable values. variableLabels []string // id is a hash of the values of the ConstLabels and fqName. This @@ -78,32 +62,27 @@ type Desc struct { // Help string. Each Desc with the same fqName must have the same // dimHash. dimHash uint64 - // err is an error that occured during construction. It is reported on + // err is an error that occurred during construction. It is reported on // registration time. err error } // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc // and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName and help must not be empty. +// be nil if no such labels should be set. fqName must not be empty. // // variableLabels only contain the label names. Their label values are variable // and therefore not part of the Desc. (They are managed within the Metric.) // // For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Opts documentation for the implications of -// constant labels. +// specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, variableLabels: variableLabels, } - if help == "" { - d.err = errors.New("empty help string") - return d - } - if !metricNameRE.MatchString(fqName) { + if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } @@ -116,7 +95,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // First add only the const label names and sort them... for labelName := range constLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, labelName) @@ -127,12 +106,18 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * for _, labelName := range labelNames { labelValues = append(labelValues, constLabels[labelName]) } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. for _, labelName := range variableLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, "$"+labelName) @@ -142,24 +127,25 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * d.err = errors.New("duplicate label names") return d } - vh := hashNew() + + xxh := xxhash.New() for _, val := range labelValues { - vh = hashAdd(vh, val) - vh = hashAddByte(vh, separatorByte) + xxh.WriteString(val) + xxh.Write(separatorByteSlice) } - d.id = vh + d.id = xxh.Sum64() // Sort labelNames so that order doesn't matter for the hash. sort.Strings(labelNames) // Now hash together (in this order) the help string and the sorted // label names. - lh := hashNew() - lh = hashAdd(lh, help) - lh = hashAddByte(lh, separatorByte) + xxh.Reset() + xxh.WriteString(help) + xxh.Write(separatorByteSlice) for _, labelName := range labelNames { - lh = hashAdd(lh, labelName) - lh = hashAddByte(lh, separatorByte) + xxh.WriteString(labelName) + xxh.Write(separatorByteSlice) } - d.dimHash = lh + d.dimHash = xxh.Sum64() d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) for n, v := range constLabels { @@ -168,7 +154,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * Value: proto.String(v), }) } - sort.Sort(LabelPairSorter(d.constLabelPairs)) + sort.Sort(labelPairSorter(d.constLabelPairs)) return d } @@ -198,8 +184,3 @@ func (d *Desc) String() string { d.variableLabels, ) } - -func checkLabelName(l string) bool { - return labelNameRE.MatchString(l) && - !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go similarity index 59% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/doc.go rename to vendor/github.com/prometheus/client_golang/prometheus/doc.go index b062af9eb..98450125d 100644 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -11,13 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheus provides metrics primitives to instrument code for -// monitoring. It also offers a registry for metrics. Sub-packages allow to -// expose the registered metrics via HTTP (package promhttp) or push them to a -// Pushgateway (package push). +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. // // All exported functions and methods are safe to be used concurrently unless -//specified otherwise. +// specified otherwise. // // A Basic Example // @@ -26,10 +28,11 @@ // package main // // import ( +// "log" // "net/http" // -// "github.com/m3db/prometheus_client_golang/prometheus" -// "github.com/m3db/prometheus_client_golang/prometheus/promhttp" +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" // ) // // var ( @@ -59,7 +62,7 @@ // // The Handler function provides a default handler to expose metrics // // via an HTTP server. "/metrics" is the usual endpoint for that. // http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":8080", nil) +// log.Fatal(http.ListenAndServe(":8080", nil)) // } // // @@ -69,34 +72,33 @@ // Metrics // // The number of exported identifiers in this package might appear a bit -// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// overwhelming. However, in addition to the basic plumbing shown in the example // above, you only need to understand the different metric types and their -// vector versions for basic usage. +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. // // Above, you have already touched the Counter and the Gauge. There are two more // advanced metric types: the Summary and Histogram. A more thorough description // of those four metric types can be found in the Prometheus docs: // https://prometheus.io/docs/concepts/metric_types/ // -// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the -// Prometheus server not to assume anything about its type. -// -// In addition to the fundamental metric types Gauge, Counter, Summary, -// Histogram, and Untyped, a very important part of the Prometheus data model is -// the partitioning of samples along dimensions called labels, which results in +// In addition to the fundamental metric types Gauge, Counter, Summary, and +// Histogram, a very important part of the Prometheus data model is the +// partitioning of samples along dimensions called labels, which results in // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// HistogramVec, and UntypedVec. +// and HistogramVec. // // While only the fundamental metric types implement the Metric interface, both // the metrics and their vector versions implement the Collector interface. A // Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, -// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, -// SummaryVec, HistogramVec, and UntypedVec are not. +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and +// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, +// and HistogramVec are not. // // To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, -// HistogramOpts, or UntypedOpts. +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // // Custom Collectors and constant Metrics // @@ -112,10 +114,23 @@ // existing numbers into Prometheus Metrics during collection. An own // implementation of the Collector interface is perfect for that. You can create // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). That will happen in -// the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. +// NewConstSummary (and their respective Must… versions). NewConstMetric is used +// for all metric types with just a float64 as their value: Counter, Gauge, and +// a special “type” called Untyped. Use the latter if you are not sure if the +// mirrored metric is a Counter or a Gauge. Creation of the Metric instance +// happens in the Collect method. The Describe method has to return separate +// Desc instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. Alternatively, +// you could return no Desc at all, which will mark the Collector “unchecked”. +// No checks are performed at registration time, but metric consistency will +// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// errors. Thus, with unchecked Collectors, the responsibility to not collect +// metrics that lead to inconsistencies in the total scrape result lies with the +// implementer of the Collector. While this is not a desirable state, it is +// sometimes necessary. The typical use case is a situation where the exact +// metrics to be returned by a Collector cannot be predicted at registration +// time, but the implementer has sufficient knowledge of the whole system to +// guarantee metric consistency. // // The Collector example illustrates the use case. You can also look at the // source code of the processCollector (mirroring process metrics), the @@ -129,34 +144,34 @@ // Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might -// cause. As suggested by the name, MustRegister panics if an error occurs. With -// the Register function, the error is returned and can be handled. +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. // // An error is returned if the registered Collector is incompatible or // inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data -// model. Inconsistencies are ideally detected at registration time, not at -// collect time. The former will usually be detected at start-up time of a -// program, while the latter will only happen at scrape time, possibly not even -// on the first scrape if the inconsistency only becomes relevant later. That is -// the main reason why a Collector and a Metric have to describe themselves to -// the registry. +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. // // So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegistry variable. With NewRegistry, you +// can be found in the global DefaultRegisterer variable. With NewRegistry, you // can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in -// the same way on a custom registry as the global functions Register and -// Unregister on the default registry. -// -// There are a number of uses for custom registries: You can use registries -// with special properties, see NewPedanticRegistry. You can avoid global state, -// as it is imposed by the DefaultRegistry. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use // separate registries for testing purposes. // -// Also note that the DefaultRegistry comes registered with a Collector for Go +// Also note that the DefaultRegisterer comes registered with a Collector for Go // runtime metrics (via NewGoCollector) and a Collector for process metrics (via // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. @@ -166,16 +181,19 @@ // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp -// sub-package. (The top-level functions in the prometheus package are -// deprecated.) +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. // // Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// // Other Means of Exposition // -// More ways of exposing metrics can easily be added. Sending metrics to -// Graphite would be an example that will soon be implemented. +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. package prometheus diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go similarity index 50% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/expvar_collector.go rename to vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go index 18a99d5fa..c41ab37f3 100644 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -22,43 +22,10 @@ type expvarCollector struct { exports map[string]*Desc } -// NewExpvarCollector returns a newly allocated expvar Collector that still has -// to be registered with a Prometheus registry. +// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector. +// See there for documentation. // -// An expvar Collector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the expvar Collector is inherently slower -// than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more -// direct implementation of Prometheus metrics for monitoring production -// systems. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. +// Deprecated: Use collectors.NewExpvarCollector instead. func NewExpvarCollector(exports map[string]*Desc) Collector { return &expvarCollector{ exports: exports, diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go similarity index 50% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/fnv.go rename to vendor/github.com/prometheus/client_golang/prometheus/fnv.go index e3b67df8a..3d383a735 100644 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/fnv.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package prometheus // Inline and byte-free variant of hash/fnv's fnv64a. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 000000000..bd0733d6a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,289 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.MetricVec.CurryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. Therefore, it must be safe to call the provided function +// concurrently. +// +// NewGaugeFunc is a good way to create an “info” style metric with a constant +// value of 1. Example: +// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go similarity index 59% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/go_collector.go rename to vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index abc9d4ec4..a96ed1cee 100644 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -1,34 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package prometheus import ( - "fmt" "runtime" "runtime/debug" + "sync" "time" ) type goCollector struct { - goroutines Gauge - gcDesc *Desc + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc - // metrics to describe and collect - metrics memStatsMetrics + // ms... are memstats related. + msLast *runtime.MemStats // Previously collected memstats. + msLastTimestamp time.Time + msMtx sync.Mutex // Protects msLast and msLastTimestamp. + msMetrics memStatsMetrics + msRead func(*runtime.MemStats) // For mocking in tests. + msMaxWait time.Duration // Wait time for fresh memstats. + msMaxAge time.Duration // Maximum allowed age of old memstats. } -// NewGoCollector returns a collector which exports metrics about the current -// go process. +// NewGoCollector is the obsolete version of collectors.NewGoCollector. +// See there for documentation. +// +// Deprecated: Use collectors.NewGoCollector instead. func NewGoCollector() Collector { return &goCollector{ - goroutines: NewGauge(GaugeOpts{ - Namespace: "go", - Name: "goroutines", - Help: "Number of goroutines that currently exist.", - }), + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the GC invocation durations.", + "A summary of the pause duration of garbage collection cycles.", nil, nil), - metrics: memStatsMetrics{ + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + msLast: &runtime.MemStats{}, + msRead: runtime.ReadMemStats, + msMaxWait: time.Second, + msMaxAge: 5 * time.Minute, + msMetrics: memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), @@ -48,7 +82,7 @@ func NewGoCollector() Collector { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained by system. Sum of all system allocations.", + "Number of bytes obtained from system.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, @@ -111,12 +145,12 @@ func NewGoCollector() Collector { valType: GaugeValue, }, { desc: NewDesc( - memstatNamespace("heap_released_bytes_total"), - "Total number of heap bytes released to OS.", + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: CounterValue, + valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("heap_objects"), @@ -213,29 +247,53 @@ func NewGoCollector() Collector { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, }, }, } } func memstatNamespace(s string) string { - return fmt.Sprintf("go_memstats_%s", s) + return "go_memstats_" + s } // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutines.Desc() + ch <- c.goroutinesDesc + ch <- c.threadsDesc ch <- c.gcDesc - - for _, i := range c.metrics { + ch <- c.goInfoDesc + for _, i := range c.msMetrics { ch <- i.desc } } // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { - c.goroutines.Set(float64(runtime.NumGoroutine())) - ch <- c.goroutines + var ( + ms = &runtime.MemStats{} + done = make(chan struct{}) + ) + // Start reading memstats first as it might take a while. + go func() { + c.msRead(ms) + c.msMtx.Lock() + c.msLast = ms + c.msLastTimestamp = time.Now() + c.msMtx.Unlock() + close(done) + }() + + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -246,11 +304,35 @@ func (c *goCollector) Collect(ch chan<- Metric) { quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() } quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + timer := time.NewTimer(c.msMaxWait) + select { + case <-done: // Our own ReadMemStats succeeded in time. Use it. + timer.Stop() // Important for high collection frequencies to not pile up timers. + c.msCollect(ch, ms) + return + case <-timer.C: // Time out, use last memstats if possible. Continue below. + } + c.msMtx.Lock() + if time.Since(c.msLastTimestamp) < c.msMaxAge { + // Last memstats are recent enough. Collect from them under the lock. + c.msCollect(ch, c.msLast) + c.msMtx.Unlock() + return + } + // If we are here, the last memstats are too old or don't exist. We have + // to wait until our own ReadMemStats finally completes. For that to + // happen, we have to release the lock. + c.msMtx.Unlock() + <-done + c.msCollect(ch, ms) +} - ms := &runtime.MemStats{} - runtime.ReadMemStats(ms) - for _, i := range c.metrics { +func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { + for _, i := range c.msMetrics { ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) } } @@ -261,3 +343,25 @@ type memStatsMetrics []struct { eval func(*runtime.MemStats) float64 valType ValueType } + +// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector. +// See there for documentation. +// +// Deprecated: Use collectors.NewBuildInfoCollector instead. +func NewBuildInfoCollector() Collector { + path, version, sum := "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } + c := &selfCollector{MustNewConstMetric( + NewDesc( + "go_build_info", + "Build information about the main Go module.", + nil, Labels{"path": path, "version": version, "checksum": sum}, + ), + GaugeValue, 1)} + c.init(c.self) + return c +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 000000000..8425640b3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,642 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. Observations are + // usually positive or zero. Negative observations are accepted but + // prevent current versions of Prometheus from properly detecting + // counter resets in the sum of observations. See + // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations + // for details. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +// +// The returned implementation also implements ExemplarObserver. It is safe to +// perform the corresponding type assertion. Exemplars are tracked separately +// for each bucket. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{{}, {}}, + now: time.Now, + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts as well as exemplars: + h.counts[0].buckets = make([]uint64, len(h.upperBounds)) + h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) + + h.init(h) // Init self-collection. + return h +} + +type histogramCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + buckets []uint64 +} + +type histogram struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // histogramCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the histogram) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + // + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*histogramCounts + + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + + now func() time.Time // To mock out time.Now() for testing. +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + h.observe(v, h.findBucket(v)) +} + +func (h *histogram) ObserveWithExemplar(v float64, e Labels) { + i := h.findBucket(v) + h.observe(v, i) + h.updateExemplar(v, i, e) +} + +func (h *histogram) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + h.writeMtx.Lock() + defer h.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := h.counts[n>>63] + coldCounts := h.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + his := &dto.Histogram{ + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + var cumCount uint64 + for i, upperBound := range h.upperBounds { + cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) + his.Bucket[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(cumCount), + UpperBound: proto.Float64(upperBound), + } + if e := h.exemplars[i].Load(); e != nil { + his.Bucket[i].Exemplar = e.(*dto.Exemplar) + } + } + // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. + if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e.(*dto.Exemplar), + } + his.Bucket = append(his.Bucket, b) + } + + out.Histogram = his + out.Label = h.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } + return nil +} + +// findBucket returns the index of the bucket for the provided value, or +// len(h.upperBounds) for the +Inf bucket. +func (h *histogram) findBucket(v float64) int { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + return sort.SearchFloat64s(h.upperBounds, v) +} + +// observe is the implementation for Observe without the findBucket part. +func (h *histogram) observe(v float64, bucket int) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] + + if bucket < len(h.upperBounds) { + atomic.AddUint64(&hotCounts.buckets[bucket], 1) + } + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +// updateExemplar replaces the exemplar for the provided bucket. With empty +// labels, it's a no-op. It panics if any of the labels is invalid. +func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { + if l == nil { + return + } + e, err := newExemplar(v, h.now(), l) + if err != nil { + panic(err) + } + h.exemplars[bucket].Store(e) +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *MetricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.MetricVec.CurryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstHistogram would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 000000000..351c26e1a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + + dto "github.com/prometheus/client_model/go" +) + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// NormalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 000000000..2744443ac --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%s: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go similarity index 64% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/metric.go rename to vendor/github.com/prometheus/client_golang/prometheus/metric.go index 5ff2e6a28..dc121910a 100644 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -15,11 +15,16 @@ package prometheus import ( "strings" + "time" - dto "github.com/m3db/prometheus_client_model/go" + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" ) -const separatorByte byte = 255 +var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. // A Metric models a single sample value with its meta data being exported to // Prometheus. Implementations of Metric in this package are Gauge, Counter, @@ -43,9 +48,8 @@ type Metric interface { // While populating dto.Metric, it is the responsibility of the // implementation to ensure validity of the Metric protobuf (like valid // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. (Implementers may find - // LabelPairSorter useful for that.) Callers of Write should still make - // sure of sorting if they depend on it. + // recommended to sort labels lexicographically. Callers of Write should + // still make sure of sorting if they depend on it. Write(*dto.Metric) error // TODO(beorn7): The original rationale of passing in a pre-allocated // dto.Metric protobuf to save allocations has disappeared. The @@ -54,11 +58,12 @@ type Metric interface { } // Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// implementation XXX has its own XXXOpts type, but in most cases, it is just // an alias of this type (which might change when the requirement arises.) // -// It is mandatory to set Name and Help to a non-empty string. All other fields -// are optional and can safely be left at their zero value. +// It is mandatory to set Name to a non-empty string. All other fields are +// optional and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. type Opts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Metric (created by joining these components with @@ -69,7 +74,7 @@ type Opts struct { Subsystem string Name string - // Help provides information about this metric. Mandatory! + // Help provides information about this metric. // // Metrics with the same fully-qualified name must have the same Help // string. @@ -79,20 +84,12 @@ type Opts struct { // with the same fully-qualified name must have the same label names in // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a metric - // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels - // serve only special purposes. One is for the special case where the - // value of a label does not change during the lifetime of a process, - // e.g. if the revision of the running binary is put into a - // label. Another, more advanced purpose is if more than one Collector - // needs to collect Metrics with the same fully-qualified name. In that - // case, those Metrics must differ in the values of their - // ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels } @@ -118,37 +115,22 @@ func BuildFQName(namespace, subsystem, name string) string { return name } -// LabelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. This is useful for implementing the Write method of -// custom metrics. -type LabelPairSorter []*dto.LabelPair +// labelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type labelPairSorter []*dto.LabelPair -func (s LabelPairSorter) Len() int { +func (s labelPairSorter) Len() int { return len(s) } -func (s LabelPairSorter) Swap(i, j int) { +func (s labelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s LabelPairSorter) Less(i, j int) bool { +func (s labelPairSorter) Less(i, j int) bool { return s[i].GetName() < s[j].GetName() } -type hashSorter []uint64 - -func (s hashSorter) Len() int { - return len(s) -} - -func (s hashSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s hashSorter) Less(i, j int) bool { - return s[i] < s[j] -} - type invalidMetric struct { desc *Desc err error @@ -164,3 +146,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric { func (m *invalidMetric) Desc() *Desc { return m.desc } func (m *invalidMetric) Write(*dto.Metric) error { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) error { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a +// way that it has an explicit timestamp set to the provided Time. This is only +// useful in rare cases as the timestamp of a Prometheus metric should usually +// be set by the Prometheus server during scraping. Exceptions include mirroring +// metrics with given timestamps from other metric +// sources. +// +// NewMetricWithTimestamp works best with MustNewConstMetric, +// MustNewConstHistogram, and MustNewConstSummary, see example. +// +// Currently, the exposition formats used by Prometheus are limited to +// millisecond resolution. Thus, the provided time will be rounded down to the +// next full millisecond value. +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 000000000..44128016f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} + +// ExemplarObserver is implemented by Observers that offer the option of +// observing a value together with an exemplar. Its ObserveWithExemplar method +// works like the Observe method of an Observer but also replaces the currently +// saved exemplar (if any) with a new one, created from the provided value, the +// current time as timestamp, and the provided Labels. Empty Labels will lead to +// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is +// left in place. ObserveWithExemplar panics if any of the provided labels are +// invalid or if the provided labels contain more than 64 runes in total. +type ExemplarObserver interface { + ObserveWithExemplar(value float64, exemplar Labels) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 000000000..5bfe0ff5b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,166 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +type processCollector struct { + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc +} + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector is the obsolete version of collectors.NewProcessCollector. +// See there for documentation. +// +// Deprecated: Use collectors.NewProcessCollector instead. +func NewProcessCollector(opts ProcessCollectorOpts) Collector { + ns := "" + if len(opts.Namespace) > 0 { + ns = opts.Namespace + "_" + } + + c := &processCollector{ + reportErrors: opts.ReportErrors, + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + maxVsize: NewDesc( + ns+"process_virtual_memory_max_bytes", + "Maximum amount of virtual memory available in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + if opts.PidFn == nil { + pid := os.Getpid() + c.pidFn = func() (int, error) { return pid, nil } + } else { + c.pidFn = opts.PidFn + } + + // Set up process metric collection if supported by the runtime. + if canCollectProcess() { + c.collectFn = c.processCollect + } else { + c.collectFn = func(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) + } + } + + return c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { + if !c.reportErrors { + return + } + if desc == nil { + desc = NewInvalidDesc(err) + } + ch <- NewInvalidMetric(desc, err) +} + +// NewPidFileFn returns a function that retrieves a pid from the specified file. +// It is meant to be used for the PidFn field in ProcessCollectorOpts. +func NewPidFileFn(pidFilePath string) func() (int, error) { + return func() (int, error) { + content, err := ioutil.ReadFile(pidFilePath) + if err != nil { + return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err) + } + pid, err := strconv.Atoi(strings.TrimSpace(string(content))) + if err != nil { + return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err) + } + + return pid, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go new file mode 100644 index 000000000..3117461cd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go new file mode 100644 index 000000000..f973398df --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func canCollectProcess() bool { + return true +} + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") +) + +type processMemoryCounters struct { + // System interface description + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex + + // Refer to the Golang internal implementation + // https://golang.org/src/internal/syscall/windows/psapi_windows.go + _ uint32 + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr +} + +func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { + mem := processMemoryCounters{} + r1, _, err := procGetProcessMemoryInfo.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&mem)), + uintptr(unsafe.Sizeof(mem)), + ) + if r1 != 1 { + return mem, err + } else { + return mem, nil + } +} + +func getProcessHandleCount(handle windows.Handle) (uint32, error) { + var count uint32 + r1, _, err := procGetProcessHandleCount.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&count)), + ) + if r1 != 1 { + return 0, err + } else { + return count, nil + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + h, err := windows.GetCurrentProcess() + if err != nil { + c.reportError(ch, nil, err) + return + } + + var startTime, exitTime, kernelTime, userTime windows.Filetime + err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) + + mem, err := getProcessMemoryInfo(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) + + handles, err := getProcessHandleCount(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. +} + +func fileTimeToSeconds(ft windows.Filetime) float64 { + return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 000000000..e7c0d0546 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,368 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + if r.observeWriteHeader != nil && !r.wroteHeader { + // Only call observeWriteHeader for the 1st time. It's a bug if + // WriteHeader is called more than once, but we want to protect + // against it here. Note that we still delegate the WriteHeader + // to the original ResponseWriter to not mask the bug from it. + r.observeWriteHeader(code) + } + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } +type pusherDelegator struct{ *responseWriterDelegator } + +func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d flusherDelegator) Flush() { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + d.ResponseWriter.(http.Flusher).Flush() +} +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 000000000..d86d0cf4b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,383 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + if opts.Registry != nil { + // Initialize all possibilities that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } + + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(rsp, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + errCnt.WithLabelValues("gathering").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + // Still report the error if no metrics have been gathered. + httpError(rsp, err) + return + } + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + + var contentType expfmt.Format + if opts.EnableOpenMetrics { + contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) + } else { + contentType = expfmt.Negotiate(req.Header) + } + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + // handleError handles the error according to opts.ErrorHandling + // and returns true if we have to abort after the handling. + handleError := func(err error) bool { + if err == nil { + return false + } + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case HTTPErrorOnError: + // We cannot really send an HTTP error at this + // point because we most likely have written + // something to rsp already. But at least we can + // stop sending. + return true + } + // Do nothing in all other cases, including ContinueOnError. + return false + } + + for _, mf := range mfs { + if handleError(enc.Encode(mf)) { + return + } + } + if closer, ok := enc.(expfmt.Closer); ok { + // This in particular takes care of the final "# EOF\n" line for OpenMetrics. + if handleError(closer.Close()) { + return + } + } + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. Note that HTTP + // errors cannot be served anymore once the beginning of a regular + // payload has been sent. Thus, in the (unlikely) case that encoding the + // payload into the negotiated wire format fails, serving the response + // will simply be aborted. Set an ErrorLog in HandlerOpts to detect + // those errors. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional Logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. Note that the + // type of a reported error is often prometheus.MultiError, which + // formats into a multi-line error string. If you want to avoid the + // latter, create a Logger implementation that detects a + // prometheus.MultiError and formats the contained errors into one line. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration + // If true, the experimental OpenMetrics encoding is added to the + // possible options during content negotiation. Note that Prometheus + // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is + // the only way to transmit exemplars. However, the move to OpenMetrics + // is not completely transparent. Most notably, the values of "quantile" + // labels of Summaries and "le" labels of Histograms are formatted with + // a trailing ".0" if they would otherwise look like integer numbers + // (which changes the identity of the resulting series on the Prometheus + // server). + EnableOpenMetrics bool +} + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerError. Error contents is +// supposed to be uncompressed plain text. Same as with a plain http.Error, this +// must not be called if the header or any payload has already been sent. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 000000000..83c49b66a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,219 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 000000000..ab037db86 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,458 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have valid metric and label names and must have zero, +// one, or two non-const non-curried labels. For those, the only allowed label +// names are "code" and "method". The function panics otherwise. The Observe +// method of the Observer in the ObserverVec is called with the request duration +// in seconds. Partitioning happens by HTTP status code and/or HTTP method if +// the respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have valid metric and label names and must have zero, one, or two +// non-const non-curried labels. For those, the only allowed label names are +// "code" and "method". The function panics otherwise. Partitioning of the +// CounterVec happens by HTTP status code and/or HTTP method if the respective +// instance label names are present in the CounterVec. For unpartitioned +// counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have valid +// metric and label names and must have zero, one, or two non-const non-curried +// labels. For those, the only allowed label names are "code" and "method". The +// function panics otherwise. The Observe method of the Observer in the +// ObserverVec is called with the request duration in seconds. Partitioning +// happens by HTTP status code and/or HTTP method if the respective instance +// label names are present in the ObserverVec. For unpartitioned observations, +// use an ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have valid metric and label names and must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request size in +// bytes. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have valid metric and label names and must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the response size in +// bytes. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +// checkLabels returns whether the provided Collector has a non-const, +// non-curried label named "code" and/or "method". It panics if the provided +// Collector does not have a Desc or has more than one Desc or its Desc is +// invalid. It also panics if the Collector has any non-const, non-curried +// labels that are not named "code" or "method". +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Make sure the Collector has a valid Desc by registering it with a + // temporary registry. + prometheus.NewRegistry().MustRegister(c) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go similarity index 51% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/registry.go rename to vendor/github.com/prometheus/client_golang/prometheus/registry.go index 0a246e5bc..383a7f594 100644 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -15,15 +15,24 @@ package prometheus import ( "bytes" - "errors" "fmt" + "io/ioutil" "os" + "path/filepath" + "runtime" "sort" + "strings" "sync" + "unicode/utf8" + "github.com/cespare/xxhash/v2" + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" - dto "github.com/m3db/prometheus_client_model/go" + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" ) const ( @@ -35,13 +44,14 @@ const ( // DefaultRegisterer and DefaultGatherer are the implementations of the // Registerer and Gatherer interface a number of convenience functions in this // package act on. Initially, both variables point to the same Registry, which -// has a process collector (see NewProcessCollector) and a Go collector (see -// NewGoCollector) already registered. This approach to keep default instances -// as global state mirrors the approach of other packages in the Go standard -// library. Note that there are caveats. Change the variables with caution and -// only if you understand the consequences. Users who want to avoid global state -// altogether should not use the convenience function and act on custom -// instances instead. +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. var ( defaultRegistry = NewRegistry() DefaultRegisterer Registerer = defaultRegistry @@ -49,7 +59,7 @@ var ( ) func init() { - MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewProcessCollector(ProcessCollectorOpts{})) MustRegister(NewGoCollector()) } @@ -65,7 +75,8 @@ func NewRegistry() *Registry { // NewPedanticRegistry returns a registry that checks during collection if each // collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. +// actually been registered with the registry. Unchecked Collectors (those whose +// Describe method does not yield any descriptors) are excluded from the check. // // Usually, a Registry will be happy as long as the union of all collected // Metrics is consistent and valid even if some metrics are not consistent with @@ -80,7 +91,7 @@ func NewPedanticRegistry() *Registry { // Registerer is the interface for the part of a registry in charge of // registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather then the Registry type +// Registerer as type for registration purposes (rather than the Registry type // directly). In that way, they are free to use custom Registerer implementation // (e.g. for testing purposes). type Registerer interface { @@ -95,8 +106,13 @@ type Registerer interface { // returned error is an instance of AlreadyRegisteredError, which // contains the previously registered Collector. // - // It is in general not safe to register the same Collector multiple - // times concurrently. + // A Collector whose Describe method does not yield any Desc is treated + // as unchecked. Registration will always succeed. No check for + // re-registering (see previous paragraph) is performed. Thus, the + // caller is responsible for not double-registering the same unchecked + // Collector, and for providing a Collector that will not cause + // inconsistent metrics on collection. (This would lead to scrape + // errors.) Register(Collector) error // MustRegister works like Register but registers any number of // Collectors and panics upon the first registration that causes an @@ -105,7 +121,9 @@ type Registerer interface { // Unregister unregisters the Collector that equals the Collector passed // in as an argument. (Two Collectors are considered equal if their // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. + // returns whether a Collector was unregistered. Note that an unchecked + // Collector cannot be unregistered (as its Describe method does not + // yield any descriptor). // // Note that even after unregistering, it will not be possible to // register a new Collector that is inconsistent with the unregistered @@ -123,15 +141,23 @@ type Registerer interface { type Gatherer interface { // Gather calls the Collect method of the registered Collectors and then // gathers the collected metrics into a lexicographically sorted slice - // of MetricFamily protobufs. Even if an error occurs, Gather attempts - // to gather as many metrics as possible. Hence, if a non-nil error is - // returned, the returned MetricFamily slice could be nil (in case of a - // fatal error that prevented any meaningful metric collection) or - // contain a number of MetricFamily protobufs, some of which might be - // incomplete, and some might be missing altogether. The returned error - // (which might be a MultiError) explains the details. In scenarios - // where complete collection is critical, the returned MetricFamily - // protobufs should be disregarded if the returned error is non-nil. + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. Gather() ([]*dto.MetricFamily, error) } @@ -152,38 +178,6 @@ func MustRegister(cs ...Collector) { DefaultRegisterer.MustRegister(cs...) } -// RegisterOrGet registers the provided Collector with the DefaultRegisterer and -// returns the Collector, unless an equal Collector was registered before, in -// which case that Collector is returned. -// -// Deprecated: RegisterOrGet is merely a convenience function for the -// implementation as described in the documentation for -// AlreadyRegisteredError. As the use case is relatively rare, this function -// will be removed in a future version of this package to clean up the -// namespace. -func RegisterOrGet(c Collector) (Collector, error) { - if err := Register(c); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - return are.ExistingCollector, nil - } - return nil, err - } - return c, nil -} - -// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning -// an error. -// -// Deprecated: This is deprecated for the same reason RegisterOrGet is. See -// there for details. -func MustRegisterOrGet(c Collector) Collector { - c, err := RegisterOrGet(c) - if err != nil { - panic(err) - } - return c -} - // Unregister removes the registration of the provided Collector from the // DefaultRegisterer. // @@ -201,25 +195,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { return gf() } -// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that -// gathers from the previous DefaultGatherers but then merges the MetricFamily -// protobufs returned from the provided hook function with the MetricFamily -// protobufs returned from the original DefaultGatherer. -// -// Deprecated: This function manipulates the DefaultGatherer variable. Consider -// the implications, i.e. don't do this concurrently with any uses of the -// DefaultGatherer. In the rare cases where you need to inject MetricFamily -// protobufs directly, it is recommended to use a custom Registry and combine it -// with a custom Gatherer using the Gatherers type (see -// there). SetMetricFamilyInjectionHook only exists for compatibility reasons -// with previous versions of this package. -func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { - DefaultGatherer = Gatherers{ - DefaultGatherer, - GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), - } -} - // AlreadyRegisteredError is returned by the Register method if the Collector to // be registered has already been registered before, or a different Collector // that collects the same metrics has been registered before. Registration fails @@ -240,6 +215,8 @@ func (err AlreadyRegisteredError) Error() string { // by a Gatherer to report multiple errors during MetricFamily gathering. type MultiError []error +// Error formats the contained errors as a bullet point list, preceded by the +// total number of errors. Note that this results in a multi-line string. func (errs MultiError) Error() string { if len(errs) == 0 { return "" @@ -252,6 +229,13 @@ func (errs MultiError) Error() string { return buf.String() } +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only // contained error as error if len(errs is 1). In all other cases, it returns // the MultiError directly. This is helpful for returning a MultiError in a way @@ -276,6 +260,7 @@ type Registry struct { collectorsByID map[uint64]Collector // ID is a hash of the descIDs. descIDs map[uint64]struct{} dimHashesByName map[string]uint64 + uncheckedCollectors []Collector pedanticChecksEnabled bool } @@ -285,7 +270,7 @@ func (r *Registry) Register(c Collector) error { descChan = make(chan *Desc, capDescChan) newDescIDs = map[uint64]struct{}{} newDimHashesByName = map[string]uint64{} - collectorID uint64 // Just a sum of all desc IDs. + collectorID uint64 // All desc IDs XOR'd together. duplicateDescErr error ) go func() { @@ -293,8 +278,13 @@ func (r *Registry) Register(c Collector) error { close(descChan) }() r.mtx.Lock() - defer r.mtx.Unlock() - // Coduct various tests... + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() + // Conduct various tests... for desc := range descChan { // Is the descriptor valid at all? @@ -307,12 +297,12 @@ func (r *Registry) Register(c Collector) error { if _, exists := r.descIDs[desc.id]; exists { duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) } - // If it is not a duplicate desc in this collector, add it to + // If it is not a duplicate desc in this collector, XOR it to // the collectorID. (We allow duplicate descs within the same // collector, but their existence must be a no-op.) if _, exists := newDescIDs[desc.id]; !exists { newDescIDs[desc.id] = struct{}{} - collectorID += desc.id + collectorID ^= desc.id } // Are all the label names and the help string consistent with @@ -333,14 +323,23 @@ func (r *Registry) Register(c Collector) error { } } } - // Did anything happen at all? + // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { - return errors.New("collector has no descriptors") + r.uncheckedCollectors = append(r.uncheckedCollectors, c) + return nil } if existing, exists := r.collectorsByID[collectorID]; exists { - return AlreadyRegisteredError{ - ExistingCollector: existing, - NewCollector: c, + switch e := existing.(type) { + case *wrappingCollector: + return AlreadyRegisteredError{ + ExistingCollector: e.unwrapRecursively(), + NewCollector: c, + } + default: + return AlreadyRegisteredError{ + ExistingCollector: e, + NewCollector: c, + } } } // If the collectorID is new, but at least one of the descs existed @@ -365,7 +364,7 @@ func (r *Registry) Unregister(c Collector) bool { var ( descChan = make(chan *Desc, capDescChan) descIDs = map[uint64]struct{}{} - collectorID uint64 // Just a sum of the desc IDs. + collectorID uint64 // All desc IDs XOR'd together. ) go func() { c.Describe(descChan) @@ -373,7 +372,7 @@ func (r *Registry) Unregister(c Collector) bool { }() for desc := range descChan { if _, exists := descIDs[desc.id]; !exists { - collectorID += desc.id + collectorID ^= desc.id descIDs[desc.id] = struct{}{} } } @@ -409,31 +408,25 @@ func (r *Registry) MustRegister(cs ...Collector) { // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { var ( - metricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + checkedMetricChan = make(chan Metric, capMetricChan) + uncheckedMetricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - - // Scatter. - // (Collectors could be complex and slow, so we call them all at once.) - wg.Add(len(r.collectorsByID)) - go func() { - wg.Wait() - close(metricChan) - }() + checkedCollectors := make(chan Collector, len(r.collectorsByID)) + uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) for _, collector := range r.collectorsByID { - go func(collector Collector) { - defer wg.Done() - collector.Collect(metricChan) - }(collector) + checkedCollectors <- collector + } + for _, collector := range r.uncheckedCollectors { + uncheckedCollectors <- collector } - // In case pedantic checks are enabled, we have to copy the map before // giving up the RLock. if r.pedanticChecksEnabled { @@ -442,133 +435,264 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs[id] = struct{}{} } } - r.mtx.RUnlock() - // Drain metricChan in case of premature return. + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-checkedCollectors: + collector.Collect(checkedMetricChan) + case collector := <-uncheckedCollectors: + collector.Collect(uncheckedMetricChan) + default: + return + } + wg.Done() + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close checkedMetricChan and uncheckedMetricChan once all collectors + // are collected. + go func() { + wg.Wait() + close(checkedMetricChan) + close(uncheckedMetricChan) + }() + + // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. defer func() { - for _ = range metricChan { + if checkedMetricChan != nil { + for range checkedMetricChan { + } + } + if uncheckedMetricChan != nil { + for range uncheckedMetricChan { + } } }() - // Gather. - for metric := range metricChan { - // This could be done concurrently, too, but it required locking - // of metricFamiliesByName (and of metricHashes if checks are - // enabled). Most likely not worth it. - desc := metric.Desc() - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - errs = append(errs, fmt.Errorf( - "error collecting metric %v: %s", desc, err, + // Copy the channel references so we can nil them out later to remove + // them from the select statements below. + cmc := checkedMetricChan + umc := uncheckedMetricChan + + for { + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, )) - continue - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { - if metricFamily.GetHelp() != desc.help { - errs = append(errs, fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - )) - continue + case metric, ok := <-umc: + if !ok { + umc = nil + break } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + default: + if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Do the same as above, + // just without the default. + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, )) - continue - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, )) - continue } - default: - panic("encountered MetricFamily with invalid type") + break } - } else { - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - errs = append(errs, fmt.Errorf( - "empty metric collected: %s", dtoMetric, - )) - continue + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + // Once both checkedMetricChan and uncheckdMetricChan are closed + // and drained, the contraption above will nil out cmc and umc, + // and then we can leave the collect loop here. + if cmc == nil && umc == nil { + break + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + // Wrapped metrics collected by an unchecked Collector can have an + // invalid Desc. + if desc.err != nil { + return desc.err + } + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { // Existing name. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { - errs = append(errs, err) - continue - } - if r.pedanticChecksEnabled { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - errs = append(errs, fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - )) - continue + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - errs = append(errs, err) - continue + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { // New name. + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { + return err + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil } // Gatherers is a slice of Gatherer instances that implements the Gatherer // interface itself. Its Gather method calls Gather on all Gatherers in the // slice in order and returns the merged results. Errors returned from the -// Gather calles are all returned in a flattened MultiError. Duplicate and +// Gather calls are all returned in a flattened MultiError. Duplicate and // inconsistent Metrics are skipped (first occurrence in slice order wins) and // reported in the returned error. // @@ -588,7 +712,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { var ( metricFamiliesByName = map[string]*dto.MetricFamily{} metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} errs MultiError // The collected errors to return in the end. ) @@ -625,10 +748,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { existingMF.Name = mf.Name existingMF.Help = mf.Help existingMF.Type = mf.Type + if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { + errs = append(errs, err) + continue + } metricFamiliesByName[mf.GetName()] = existingMF } for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { errs = append(errs, err) continue } @@ -636,88 +763,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { } } } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } -// normalizeMetricFamilies returns a MetricFamily slice whith empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) +// checkSuffixCollisions checks for collisions with the “magic” suffixes the +// Prometheus text format and the internal metric representation of the +// Prometheus server add while flattening Summaries and Histograms. +func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { + var ( + newName = mf.GetName() + newType = mf.GetType() + newNameWithoutSuffix = "" + ) + switch { + case strings.HasSuffix(newName, "_count"): + newNameWithoutSuffix = newName[:len(newName)-6] + case strings.HasSuffix(newName, "_sum"): + newNameWithoutSuffix = newName[:len(newName)-4] + case strings.HasSuffix(newName, "_bucket"): + newNameWithoutSuffix = newName[:len(newName)-7] + } + if newNameWithoutSuffix != "" { + if existingMF, ok := mfs[newNameWithoutSuffix]; ok { + switch existingMF.GetType() { + case dto.MetricType_SUMMARY: + if !strings.HasSuffix(newName, "_bucket") { + return fmt.Errorf( + "collected metric named %q collides with previously collected summary named %q", + newName, newNameWithoutSuffix, + ) + } + case dto.MetricType_HISTOGRAM: + return fmt.Errorf( + "collected metric named %q collides with previously collected histogram named %q", + newName, newNameWithoutSuffix, + ) + } + } } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) + if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_count"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_count", + ) + } + if _, ok := mfs[newName+"_sum"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_sum", + ) } } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) + if newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_bucket"]; ok { + return fmt.Errorf( + "collected histogram named %q collides with previously collected metric named %q", + newName, newName+"_bucket", + ) + } } - return result + return nil } // checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashed the Metric labels and the MetricFamily -// name. If the resulting hash is alread in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. The provided dimHashes maps -// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes -// doesn't yet contain a hash for the provided MetricFamily, it is -// added. Otherwise, an error is returned if the existing dimHashes in not equal -// the calculated dimHash. +// provided MetricFamily. It also hashes the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. func checkMetricConsistency( metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, metricHashes map[uint64]struct{}, - dimHashes map[string]uint64, ) error { + name := metricFamily.GetName() + // Type consistency with metric family. if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || @@ -725,42 +844,67 @@ func checkMetricConsistency( metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { return fmt.Errorf( - "collected metric %s %s is not a %s", - metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + "collected metric %q { %s} is not a %s", + name, dtoMetric, metricFamily.GetType(), ) } - // Is the metric unique (i.e. no other metric with the same name and the same label values)? - h := hashNew() - h = hashAdd(h, metricFamily.GetName()) - h = hashAddByte(h, separatorByte) - dh := hashNew() + previousLabelName := "" + for _, labelPair := range dtoMetric.GetLabel() { + labelName := labelPair.GetName() + if labelName == previousLabelName { + return fmt.Errorf( + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, + ) + } + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { + return fmt.Errorf( + "collected metric %q { %s} must not have an explicit %q label", + name, dtoMetric, quantileLabel, + ) + } + if !utf8.ValidString(labelPair.GetValue()) { + return fmt.Errorf( + "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", + name, dtoMetric, labelName, labelPair.GetValue()) + } + previousLabelName = labelName + } + + // Is the metric unique (i.e. no other metric with the same name and the same labels)? + h := xxhash.New() + h.WriteString(name) + h.Write(separatorByteSlice) // Make sure label pairs are sorted. We depend on it for the consistency // check. - sort.Sort(LabelPairSorter(dtoMetric.Label)) + if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(labelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetValue()) - h = hashAddByte(h, separatorByte) - dh = hashAdd(dh, lp.GetName()) - dh = hashAddByte(dh, separatorByte) + h.WriteString(lp.GetName()) + h.Write(separatorByteSlice) + h.WriteString(lp.GetValue()) + h.Write(separatorByteSlice) } - if _, exists := metricHashes[h]; exists { + hSum := h.Sum64() + if _, exists := metricHashes[hSum]; exists { return fmt.Errorf( - "collected metric %s %s was collected before with the same name and label values", - metricFamily.GetName(), dtoMetric, + "collected metric %q { %s} was collected before with the same name and label values", + name, dtoMetric, ) } - if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { - if dimHash != dh { - return fmt.Errorf( - "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", - metricFamily.GetName(), dtoMetric, - ) - } - } else { - dimHashes[metricFamily.GetName()] = dh - } - metricHashes[h] = struct{}{} + metricHashes[hSum] = struct{}{} return nil } @@ -778,8 +922,8 @@ func checkDescConsistency( } // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) - lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ Name: proto.String(l), @@ -791,7 +935,7 @@ func checkDescConsistency( metricFamily.GetName(), dtoMetric, desc, ) } - sort.Sort(LabelPairSorter(lpsFromDesc)) + sort.Sort(labelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go similarity index 51% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/summary.go rename to vendor/github.com/prometheus/client_golang/prometheus/summary.go index da2a7a8fb..c5fa8ed7c 100644 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -16,14 +16,17 @@ package prometheus import ( "fmt" "math" + "runtime" "sort" "sync" + "sync/atomic" "time" "github.com/beorn7/perks/quantile" + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" - dto "github.com/m3db/prometheus_client_model/go" + dto "github.com/prometheus/client_model/go" ) // quantileLabel is used for the label that defines the quantile in a @@ -36,7 +39,10 @@ const quantileLabel = "quantile" // // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. +// as rank estimations. However, the default behavior will change in the +// upcoming v1.0.0 of the library. There will be no rank estimations at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. // // Note that the rank estimations cannot be aggregated in a meaningful way with // the Prometheus query language (i.e. you cannot average or add them). If you @@ -49,17 +55,17 @@ type Summary interface { Metric Collector - // Observe adds a single observation to the summary. + // Observe adds a single observation to the summary. Observations are + // usually positive or zero. Negative observations are accepted but + // prevent current versions of Prometheus from properly detecting + // counter resets in the sum of observations. See + // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations + // for details. Observe(float64) } -// DefObjectives are the default Summary quantile values. -var ( - DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} - - errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, - ) +var errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, ) // Default values for SummaryOpts. @@ -75,8 +81,10 @@ const ( ) // SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v1.0.0 of the library. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with @@ -87,39 +95,40 @@ type SummaryOpts struct { Subsystem string Name string - // Help provides information about this Summary. Mandatory! + // Help provides information about this Summary. // // Metrics with the same fully-qualified name must have the same Help // string. Help string - // ConstLabels are used to attach fixed labels to this - // Summary. Summaries with the same fully-qualified name must have the - // same label names in their ConstLabels. + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // SummaryVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Summaries with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. + // Due to the way a Summary is represented in the Prometheus text format + // and how it is handled by the Prometheus server internally, “quantile” + // is an illegal label name. Construction of a Summary or SummaryVec + // will panic if this label name is used in ConstLabels. // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported - // for q will be the φ-quantile value for some φ between q-e and q+e. - // The default value is DefObjectives. + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is an empty map, resulting in a summary without + // quantiles. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant - // for the summary. Must be positive. The default value is DefMaxAge. + // for the summary. Only applies to pre-calculated quantiles, does not + // apply to _sum and _count. Must be positive. The default value is + // DefMaxAge. MaxAge time.Duration // AgeBuckets is the number of buckets used to exclude observations that @@ -139,7 +148,7 @@ type SummaryOpts struct { BufCap uint32 } -// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging // summaries in the first place. To avoid using Merge, we are currently adding @@ -169,7 +178,7 @@ func NewSummary(opts SummaryOpts) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { @@ -183,8 +192,8 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } } - if len(opts.Objectives) == 0 { - opts.Objectives = DefObjectives + if opts.Objectives == nil { + opts.Objectives = map[float64]float64{} } if opts.MaxAge < 0 { @@ -202,13 +211,24 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: MakeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{{}, {}}, + } + s.init(s) // Init self-collection. + return s + } + s := &summary{ desc: desc, objectives: opts.Objectives, sortedObjectives: make([]float64, 0, len(opts.Objectives)), - labelPairs: makeLabelPairs(desc, labelValues), + labelPairs: MakeLabelPairs(desc, labelValues), hotBuf: make([]float64, 0, opts.BufCap), coldBuf: make([]float64, 0, opts.BufCap), @@ -370,6 +390,116 @@ func (s *summary) swapBufs(now time.Time) { } } +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // summaryCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the summary) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 1) + hotCounts := s.counts[n>>63] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := s.counts[n>>63] + coldCounts := s.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + sum := &dto.Summary{ + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + type quantSort []*dto.Quantile func (s quantSort) Len() int { @@ -394,9 +524,17 @@ type SummaryVec struct { } // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. +// +// Due to the way a Summary is represented in the Prometheus text format and how +// it is handled by the Prometheus server internally, “quantile” is an illegal +// label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + for _, ln := range labelNames { + if ln == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, @@ -404,47 +542,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { opts.ConstLabels, ) return &SummaryVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { return newSummary(desc, opts, lvs...) }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Summary and not a -// Metric so that no type conversion is required. -func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Summary), err + return metric.(Observer), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Summary and not a Metric so that no -// type conversion is required. -func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.MetricVec.GetMetricWith(labels) if metric != nil { - return metric.(Summary), err + return metric.(Observer), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { - return m.MetricVec.WithLabelValues(lvs...).(Summary) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *SummaryVec) With(labels Labels) Summary { - return m.MetricVec.With(labels).(Summary) +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.MetricVec.CurryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } type constSummary struct { @@ -497,7 +704,7 @@ func (s *constSummary) Write(out *dto.Metric) error { // map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. +// consistent with the variable labels in Desc or if Desc is invalid. func NewConstSummary( desc *Desc, count uint64, @@ -505,15 +712,18 @@ func NewConstSummary( quantiles map[float64]float64, labelValues ...string, ) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constSummary{ desc: desc, count: count, sum: sum, quantiles: quantiles, - labelPairs: makeLabelPairs(desc, labelValues), + labelPairs: MakeLabelPairs(desc, labelValues), }, nil } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 000000000..8d5f10523 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. The observed +// duration is also returned. ObserveDuration is usually called with a defer +// statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() time.Duration { + d := time.Since(t.begin) + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 000000000..0f9ce63f4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/m3db/prometheus_client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go similarity index 61% rename from vendor/github.com/m3db/prometheus_client_golang/prometheus/value.go rename to vendor/github.com/prometheus/client_golang/prometheus/value.go index c7839d1f0..c778711b8 100644 --- a/vendor/github.com/m3db/prometheus_client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,21 +14,23 @@ package prometheus import ( - "errors" "fmt" - "math" "sort" - "sync/atomic" - - dto "github.com/m3db/prometheus_client_model/go" + "time" + "unicode/utf8" + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + + dto "github.com/prometheus/client_model/go" ) // ValueType is an enumeration of metric types that represent a simple value. type ValueType int -// Possible values for the ValueType enum. +// Possible values for the ValueType enum. Use UntypedValue to mark a metric +// with an unknown type. const ( _ ValueType = iota CounterValue @@ -36,77 +38,6 @@ const ( UntypedValue ) -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -// value is a generic metric for simple values. It implements Metric, Collector, -// Counter, Gauge, and Untyped. Its effective type is determined by -// ValueType. This is a low-level building block used by the library to back the -// implementations of Counter, Gauge, and Untyped. -type value struct { - // valBits containst the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - valType ValueType - labelPairs []*dto.LabelPair -} - -// newValue returns a newly allocated value with the given Desc, ValueType, -// sample value and label values. It panics if the number of label -// values is different from the number of variable labels in Desc. -func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { - if len(labelValues) != len(desc.variableLabels) { - panic(errInconsistentCardinality) - } - result := &value{ - desc: desc, - valType: valueType, - valBits: math.Float64bits(val), - labelPairs: makeLabelPairs(desc, labelValues), - } - result.init(result) - return result -} - -func (v *value) Desc() *Desc { - return v.desc -} - -func (v *value) Set(val float64) { - atomic.StoreUint64(&v.valBits, math.Float64bits(val)) -} - -func (v *value) Inc() { - v.Add(1) -} - -func (v *value) Dec() { - v.Add(-1) -} - -func (v *value) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&v.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { - return - } - } -} - -func (v *value) Sub(val float64) { - v.Add(val * -1) -} - -func (v *value) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) - return populateMetric(v.valType, val, v.labelPairs, out) -} - // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the @@ -132,7 +63,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val desc: desc, valType: valueType, function: function, - labelPairs: makeLabelPairs(desc, nil), + labelPairs: MakeLabelPairs(desc, nil), } result.init(result) return result @@ -143,7 +74,7 @@ func (v *valueFunc) Desc() *Desc { } func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, out) + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) } // NewConstMetric returns a metric with one fixed value that cannot be @@ -151,16 +82,20 @@ func (v *valueFunc) Write(out *dto.Metric) error { // operations. However, when implementing custom Collectors, it is useful as a // throw-away metric that is generated on the fly to send it to Prometheus in // the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc. +// labelValues is not consistent with the variable labels in Desc or if Desc is +// invalid. func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constMetric{ desc: desc, valType: valueType, val: value, - labelPairs: makeLabelPairs(desc, labelValues), + labelPairs: MakeLabelPairs(desc, labelValues), }, nil } @@ -186,19 +121,20 @@ func (m *constMetric) Desc() *Desc { } func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, out) + return populateMetric(m.valType, m.val, m.labelPairs, nil, out) } func populateMetric( t ValueType, v float64, labelPairs []*dto.LabelPair, + e *dto.Exemplar, m *dto.Metric, ) error { m.Label = labelPairs switch t { case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v)} + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: @@ -209,7 +145,14 @@ func populateMetric( return nil } -func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { +// MakeLabelPairs is a helper function to create protobuf LabelPairs from the +// variable and constant labels in the provided Desc. The values for the +// variable labels are defined by the labelValues slice, which must be in the +// same order as the corresponding variable labels in the Desc. +// +// This function is only needed for custom Metric implementations. See MetricVec +// example. +func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) if totalLen == 0 { // Super fast path. @@ -226,9 +169,44 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { Value: proto.String(labelValues[i]), }) } - for _, lp := range desc.constLabelPairs { - labelPairs = append(labelPairs, lp) - } - sort.Sort(LabelPairSorter(labelPairs)) + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(labelPairSorter(labelPairs)) return labelPairs } + +// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. +const ExemplarMaxRunes = 64 + +// newExemplar creates a new dto.Exemplar from the provided values. An error is +// returned if any of the label names or values are invalid or if the total +// number of runes in the label names and values exceeds ExemplarMaxRunes. +func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { + e := &dto.Exemplar{} + e.Value = proto.Float64(value) + tsProto, err := ptypes.TimestampProto(ts) + if err != nil { + return nil, err + } + e.Timestamp = tsProto + labelPairs := make([]*dto.LabelPair, 0, len(l)) + var runes int + for name, value := range l { + if !checkLabelName(name) { + return nil, fmt.Errorf("exemplar label name %q is invalid", name) + } + runes += utf8.RuneCountInString(name) + if !utf8.ValidString(value) { + return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) + } + runes += utf8.RuneCountInString(value) + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(name), + Value: proto.String(value), + }) + } + if runes > ExemplarMaxRunes { + return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) + } + e.Label = labelPairs + return e, nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 000000000..4ababe6c9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,556 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// MetricVec is a Collector to bundle metrics of the same name that differ in +// their label values. MetricVec is not used directly but as a building block +// for implementations of vectors of a given metric type, like GaugeVec, +// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be +// used for custom Metric implementations. +// +// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in +// FooVec and initialize it with NewMetricVec. Implement wrappers for +// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather +// than (Metric, error). Similarly, create a wrapper for CurryWith that returns +// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also +// add the convenience methods WithLabelValues, With, and MustCurryWith, which +// panic instead of returning errors. See also the MetricVec example. +type MetricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// NewMetricVec returns an initialized metricVec. +func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { + return &MetricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +// Without explicit forwarding of Describe, Collect, Reset, those methods won't +// show up in GoDoc. + +// Describe implements Collector. +func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { m.metricMap.Reset() } + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the MetricVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +// +// Note that CurryWith is usually not called directly but through a wrapper +// around MetricVec, implementing a vector for a specific Metric +// implementation, for example GaugeVec. +func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &MetricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created (by +// calling the newMetric function provided during construction of the +// MetricVec). +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it in its initial state. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// +// Note that GetMetricWithLabelValues is usually not called directly but through +// a wrapper around MetricVec, implementing a vector for a specific Metric +// implementation, for example GaugeVec. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +// +// Note that GetMetricWith is usually not called directly but through a wrapper +// around MetricVec, implementing a vector for a specific Metric implementation, +// for example GaugeVec. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + old := metrics + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + old[len(old)-1] = metricWithLabelValues{} + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + old := metrics + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + old[len(old)-1] = metricWithLabelValues{} + } else { + delete(m.metrics, h) + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 000000000..74ee93280 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -0,0 +1,214 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// WrapRegistererWith returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. Wrapping a nil value is valid, resulting +// in a no-op Registerer. +// +// WrapRegistererWith provides a way to add fixed labels to a subset of +// Collectors. It should not be used to add fixed labels to all metrics +// exposed. See also +// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +// +// The Collector example demonstrates a use of WrapRegistererWith. +func WrapRegistererWith(labels Labels, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + labels: labels, + } +} + +// WrapRegistererWithPrefix returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided prefix to the name of all Metrics it collects. +// Wrapping a nil value is valid, resulting in a no-op Registerer. +// +// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of +// a sub-system. To make this work, register metrics of the sub-system with the +// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful +// to use the same prefix for all metrics exposed. In particular, do not prefix +// metric names that are standardized across applications, as that would break +// horizontal monitoring, for example the metrics provided by the Go collector +// (see NewGoCollector) and the process collector (see NewProcessCollector). (In +// fact, those metrics are already prefixed with “go_” or “process_”, +// respectively.) +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + prefix: prefix, + } +} + +type wrappingRegisterer struct { + wrappedRegisterer Registerer + prefix string + labels Labels +} + +func (r *wrappingRegisterer) Register(c Collector) error { + if r.wrappedRegisterer == nil { + return nil + } + return r.wrappedRegisterer.Register(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + if r.wrappedRegisterer == nil { + return + } + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +func (r *wrappingRegisterer) Unregister(c Collector) bool { + if r.wrappedRegisterer == nil { + return false + } + return r.wrappedRegisterer.Unregister(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +type wrappingCollector struct { + wrappedCollector Collector + prefix string + labels Labels +} + +func (c *wrappingCollector) Collect(ch chan<- Metric) { + wrappedCh := make(chan Metric) + go func() { + c.wrappedCollector.Collect(wrappedCh) + close(wrappedCh) + }() + for m := range wrappedCh { + ch <- &wrappingMetric{ + wrappedMetric: m, + prefix: c.prefix, + labels: c.labels, + } + } +} + +func (c *wrappingCollector) Describe(ch chan<- *Desc) { + wrappedCh := make(chan *Desc) + go func() { + c.wrappedCollector.Describe(wrappedCh) + close(wrappedCh) + }() + for desc := range wrappedCh { + ch <- wrapDesc(desc, c.prefix, c.labels) + } +} + +func (c *wrappingCollector) unwrapRecursively() Collector { + switch wc := c.wrappedCollector.(type) { + case *wrappingCollector: + return wc.unwrapRecursively() + default: + return wc + } +} + +type wrappingMetric struct { + wrappedMetric Metric + prefix string + labels Labels +} + +func (m *wrappingMetric) Desc() *Desc { + return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) +} + +func (m *wrappingMetric) Write(out *dto.Metric) error { + if err := m.wrappedMetric.Write(out); err != nil { + return err + } + if len(m.labels) == 0 { + // No wrapping labels. + return nil + } + for ln, lv := range m.labels { + out.Label = append(out.Label, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(lv), + }) + } + sort.Sort(labelPairSorter(out.Label)) + return nil +} + +func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { + constLabels := Labels{} + for _, lp := range desc.constLabelPairs { + constLabels[*lp.Name] = *lp.Value + } + for ln, lv := range labels { + if _, alreadyUsed := constLabels[ln]; alreadyUsed { + return &Desc{ + fqName: desc.fqName, + help: desc.help, + variableLabels: desc.variableLabels, + constLabelPairs: desc.constLabelPairs, + err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), + } + } + constLabels[ln] = lv + } + // NewDesc will do remaining validations. + newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + // Propagate errors if there was any. This will override any errer + // created by NewDesc above, i.e. earlier errors get precedence. + if desc.err != nil { + newDesc.err = desc.err + } + return newDesc +} diff --git a/vendor/github.com/m3db/prometheus_client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE similarity index 100% rename from vendor/github.com/m3db/prometheus_client_model/LICENSE rename to vendor/github.com/prometheus/client_model/LICENSE diff --git a/vendor/github.com/m3db/prometheus_client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE similarity index 100% rename from vendor/github.com/m3db/prometheus_client_model/NOTICE rename to vendor/github.com/prometheus/client_model/NOTICE diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 000000000..2f4930d9d --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,723 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metrics.proto + +package io_prometheus_client + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} + +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} + +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} + +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{0} +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{0} +} + +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{1} +} + +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{2} +} + +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{3} +} + +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{4} +} + +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{5} +} + +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{6} +} + +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{7} +} + +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Exemplar struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{8} +} + +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Exemplar.Unmarshal(m, b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return xxx_messageInfo_Exemplar.Size(m) +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{9} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{10} +} + +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } + +var fileDescriptor_6039342a2ba47b72 = []byte{ + // 665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, + 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, + 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, + 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, + 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, + 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, + 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, + 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, + 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, + 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, + 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, + 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, + 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, + 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, + 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, + 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, + 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, + 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, + 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, + 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, + 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, + 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, + 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, + 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, + 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, + 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, + 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, + 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, + 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, + 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, + 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, + 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, + 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, + 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, + 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, + 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, + 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, + 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, + 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, + 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/m3db/prometheus_common/LICENSE b/vendor/github.com/prometheus/common/LICENSE similarity index 100% rename from vendor/github.com/m3db/prometheus_common/LICENSE rename to vendor/github.com/prometheus/common/LICENSE diff --git a/vendor/github.com/m3db/prometheus_common/NOTICE b/vendor/github.com/prometheus/common/NOTICE similarity index 100% rename from vendor/github.com/m3db/prometheus_common/NOTICE rename to vendor/github.com/prometheus/common/NOTICE diff --git a/vendor/github.com/m3db/prometheus_common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go similarity index 98% rename from vendor/github.com/m3db/prometheus_common/expfmt/decode.go rename to vendor/github.com/prometheus/common/expfmt/decode.go index c877df030..7657f841d 100644 --- a/vendor/github.com/m3db/prometheus_common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -20,10 +20,10 @@ import ( "mime" "net/http" - dto "github.com/m3db/prometheus_client_model/go" + dto "github.com/prometheus/client_model/go" - "github.com/m3db/prometheus_common/model" "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" ) // Decoder types decode an input stream into metric families. @@ -164,7 +164,7 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error { } // ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurrs during sample extraction, it continues to +// families. If an error occurs during sample extraction, it continues to // extract from the remaining metric families. The returned error is the last // error that has occurred. func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 000000000..bd4e34745 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,162 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +// Closer is implemented by Encoders that need to be closed to finalize +// encoding. (For example, OpenMetrics needs a final `# EOF` line.) +// +// Note that all Encoder implementations returned from this package implement +// Closer, too, even if the Close call is a no-op. This happens in preparation +// for adding a Close method to the Encoder interface directly in a (mildly +// breaking) release in the future. +type Closer interface { + Close() error +} + +type encoderCloser struct { + encode func(*dto.MetricFamily) error + close func() error +} + +func (ec encoderCloser) Encode(v *dto.MetricFamily) error { + return ec.encode(v) +} + +func (ec encoderCloser) Close() error { + return ec.close() +} + +// Negotiate returns the Content-Type based on the given Accept header. If no +// appropriate accepted type is found, FmtText is returned (which is the +// Prometheus text format). This function will never negotiate FmtOpenMetrics, +// as the support is still experimental. To include the option to negotiate +// FmtOpenMetrics, use NegotiateOpenMetrics. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NegotiateIncludingOpenMetrics works like Negotiate but includes +// FmtOpenMetrics as an option for the result. Note that this function is +// temporary and will disappear once FmtOpenMetrics is fully supported and as +// such may be negotiated by the normal Negotiate function. +func NegotiateIncludingOpenMetrics(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { + return FmtOpenMetrics + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. All +// Encoder implementations returned by NewEncoder also implement Closer, and +// callers should always call the Close method. It is currently only required +// for FmtOpenMetrics, but a future (breaking) release will add the Close method +// to the Encoder interface directly. The current version of the Encoder +// interface is kept for backwards compatibility. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtProtoCompact: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }, + close: func() error { return nil }, + } + case FmtProtoText: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }, + close: func() error { return nil }, + } + case FmtText: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtOpenMetrics: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToOpenMetrics(w, v) + return err + }, + close: func() error { + _, err := FinalizeOpenMetrics(w) + return err + }, + } + } + panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) +} diff --git a/vendor/github.com/m3db/prometheus_common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go similarity index 76% rename from vendor/github.com/m3db/prometheus_common/expfmt/expfmt.go rename to vendor/github.com/prometheus/common/expfmt/expfmt.go index c71bcb981..0f176fa64 100644 --- a/vendor/github.com/m3db/prometheus_common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -19,10 +19,12 @@ type Format string // Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion = "0.0.1" // The Content-Type values for the different wire protocols. FmtUnknown Format = `` @@ -30,6 +32,7 @@ const ( FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` FmtProtoText Format = ProtoFmt + ` encoding=text` FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` ) const ( diff --git a/vendor/github.com/m3db/prometheus_common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go similarity index 100% rename from vendor/github.com/m3db/prometheus_common/expfmt/fuzz.go rename to vendor/github.com/prometheus/common/expfmt/fuzz.go diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go new file mode 100644 index 000000000..8a9313a3b --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -0,0 +1,527 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the +// OpenMetrics text format and writes the resulting lines to 'out'. It returns +// the number of bytes written and any error encountered. The output will have +// the same order as the input, no further sorting is performed. Furthermore, +// this function assumes the input is already sanitized and does not perform any +// sanity checks. If the input contains duplicate metrics or invalid metric or +// label names, the conversion will result in invalid text format output. +// +// This function fulfills the type 'expfmt.encoder'. +// +// Note that OpenMetrics requires a final `# EOF` line. Since this function acts +// on individual metric families, it is the responsibility of the caller to +// append this line to 'out' once all metric families have been written. +// Conveniently, this can be done by calling FinalizeOpenMetrics. +// +// The output should be fully OpenMetrics compliant. However, there are a few +// missing features and peculiarities to avoid complications when switching from +// Prometheus to OpenMetrics or vice versa: +// +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. +// +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. +// +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). +// +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var ( + n int + metricType = in.GetType() + shortName = name + ) + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { + shortName = name[:len(name)-6] + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + switch metricType { + case dto.MetricType_COUNTER: + if strings.HasSuffix(name, "_total") { + n, err = w.WriteString(" counter\n") + } else { + n, err = w.WriteString(" unknown\n") + } + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" unknown\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + // Note that we have ensured above that either the name + // ends on `_total` or that the rendered type is + // `unknown`. Therefore, no `_total` must be added here. + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), 0, false, + metric.Counter.Exemplar, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), 0, false, + nil, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), 0, false, + nil, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeOpenMetricsSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Summary.GetSampleCount(), true, + nil, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + 0, b.GetCumulativeCount(), true, + b.Exemplar, + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. +func FinalizeOpenMetrics(w io.Writer) (written int, err error) { + return w.Write([]byte("# EOF\n")) +} + +// writeOpenMetricsSample writes a single sample in OpenMetrics text format to +// w, given the metric name, the metric proto message itself, optionally an +// additional label name with a float64 value (use empty string as label name if +// not required), the value (optionally as float64 or uint64, determined by +// useIntValue), and optionally an exemplar (use nil if not required). The +// function returns the number of bytes written and any error encountered. +func writeOpenMetricsSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + floatValue float64, intValue uint64, useIntValue bool, + exemplar *dto.Exemplar, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeOpenMetricsLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + if useIntValue { + n, err = writeUint(w, intValue) + } else { + n, err = writeOpenMetricsFloat(w, floatValue) + } + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly without converting to a float first. + n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) + written += n + if err != nil { + return written, err + } + } + if exemplar != nil { + n, err = writeExemplar(w, exemplar) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float +// in OpenMetrics style. +func writeOpenMetricsLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeExemplar writes the provided exemplar in OpenMetrics format to w. The +// function returns the number of bytes written and any error encountered. +func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { + written := 0 + n, err := w.WriteString(" # ") + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, e.GetValue()) + written += n + if err != nil { + return written, err + } + if e.Timestamp != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + ts, err := ptypes.Timestamp((*e).Timestamp) + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting +// number would otherwise contain neither a "." nor an "e". +func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return w.WriteString("1.0") + case f == 0: + return w.WriteString("0.0") + case f == -1: + return w.WriteString("-1.0") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if !bytes.ContainsAny(*bp, "e.") { + *bp = append(*bp, '.', '0') + } + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeUint is like writeInt just for uint64. +func writeUint(w enhancedWriter, u uint64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendUint((*bp)[:0], u, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 000000000..5ba503b06 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,465 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "math" + "strconv" + "strings" + "sync" + + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// enhancedWriter has all the enhanced write functions needed here. bufio.Writer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriter(ioutil.Discard) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { + // Fail-fast checks. + if len(in.Metric) == 0 { + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var n int + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + metricType := in.GetType() + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Summary.GetSampleCount()), + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + float64(metric.Histogram.GetSampleCount()), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Histogram.GetSampleCount()), + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// writeSample writes a single sample in text format to w, given the metric +// name, the metric proto message itself, optionally an additional label name +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. +func writeSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + value float64, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +var ( + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { + if includeDoubleQuote { + return quotedEscaper.WriteString(w, v) + } + return escaper.WriteString(w, v) +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/m3db/prometheus_common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go similarity index 96% rename from vendor/github.com/m3db/prometheus_common/expfmt/text_parse.go rename to vendor/github.com/prometheus/common/expfmt/text_parse.go index 6f34c20a4..b6079b31e 100644 --- a/vendor/github.com/m3db/prometheus_common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,10 +22,10 @@ import ( "strconv" "strings" - dto "github.com/m3db/prometheus_client_model/go" + dto "github.com/prometheus/client_model/go" "github.com/golang/protobuf/proto" - "github.com/m3db/prometheus_common/model" + "github.com/prometheus/common/model" ) // A stateFn is a function that represents a state in a state machine. By @@ -299,6 +299,17 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) return nil } + // Check for duplicate label names. + labels := make(map[string]struct{}) + for _, l := range p.currentMetric.Label { + lName := l.GetName() + if _, exists := labels[lName]; !exists { + labels[lName] = struct{}{} + } else { + p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + return nil + } + } return p.startLabelValue } @@ -325,7 +336,7 @@ func (p *TextParser) startLabelValue() stateFn { // - Other labels have to be added to currentLabels for signature calculation. if p.currentMF.GetType() == dto.MetricType_SUMMARY { if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -337,7 +348,7 @@ func (p *TextParser) startLabelValue() stateFn { // Similar special treatment of histograms. if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -359,7 +370,7 @@ func (p *TextParser) startLabelValue() stateFn { } return p.readingValue default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) return nil } } @@ -392,7 +403,7 @@ func (p *TextParser) readingValue() stateFn { if p.readTokenUntilWhitespace(); p.err != nil { return nil // Unexpected end of input. } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) + value, err := parseFloat(p.currentToken.String()) if err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) @@ -755,3 +766,10 @@ func histogramMetricName(name string) string { return name } } + +func parseFloat(s string) (float64, error) { + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/m3db/prometheus_common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt similarity index 100% rename from vendor/github.com/m3db/prometheus_common/internal/bitbucket.org/ww/goautoneg/README.txt rename to vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt diff --git a/vendor/github.com/m3db/prometheus_common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go similarity index 100% rename from vendor/github.com/m3db/prometheus_common/internal/bitbucket.org/ww/goautoneg/autoneg.go rename to vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 648b38cb6..26e92288c 100644 --- a/vendor/github.com/m3db/prometheus_common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -1,12 +1,12 @@ /* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/vendor/github.com/m3db/prometheus_common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go similarity index 100% rename from vendor/github.com/m3db/prometheus_common/model/alert.go rename to vendor/github.com/prometheus/common/model/alert.go diff --git a/vendor/github.com/m3db/prometheus_common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go similarity index 100% rename from vendor/github.com/m3db/prometheus_common/model/fingerprinting.go rename to vendor/github.com/prometheus/common/model/fingerprinting.go diff --git a/vendor/github.com/m3db/prometheus_common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go similarity index 95% rename from vendor/github.com/m3db/prometheus_common/model/fnv.go rename to vendor/github.com/prometheus/common/model/fnv.go index 038fc1c90..367afecd3 100644 --- a/vendor/github.com/m3db/prometheus_common/model/fnv.go +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -20,7 +20,7 @@ const ( prime64 = 1099511628211 ) -// hashNew initializies a new fnv64a hash value. +// hashNew initializes a new fnv64a hash value. func hashNew() uint64 { return offset64 } diff --git a/vendor/github.com/m3db/prometheus_common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go similarity index 94% rename from vendor/github.com/m3db/prometheus_common/model/labels.go rename to vendor/github.com/prometheus/common/model/labels.go index 41051a01a..ef8956335 100644 --- a/vendor/github.com/m3db/prometheus_common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -45,6 +45,14 @@ const ( // scrape a target. MetricsPathLabel = "__metrics_path__" + // ScrapeIntervalLabel is the name of the label that holds the scrape interval + // used to scrape a target. + ScrapeIntervalLabel = "__scrape_interval__" + + // ScrapeTimeoutLabel is the name of the label that holds the scrape + // timeout used to scrape a target. + ScrapeTimeoutLabel = "__scrape_timeout__" + // ReservedLabelPrefix is a prefix which is not legal in user-supplied // label names. ReservedLabelPrefix = "__" diff --git a/vendor/github.com/m3db/prometheus_common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go similarity index 100% rename from vendor/github.com/m3db/prometheus_common/model/labelset.go rename to vendor/github.com/prometheus/common/model/labelset.go diff --git a/vendor/github.com/m3db/prometheus_common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go similarity index 99% rename from vendor/github.com/m3db/prometheus_common/model/metric.go rename to vendor/github.com/prometheus/common/model/metric.go index f7250909b..00804b7fe 100644 --- a/vendor/github.com/m3db/prometheus_common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -21,7 +21,6 @@ import ( ) var ( - separator = []byte{0} // MetricNameRE is a regular expression matching valid metric // names. Note that the IsValidMetricName function performs the same // check but faster than a match with this regular expression. diff --git a/vendor/github.com/m3db/prometheus_common/model/model.go b/vendor/github.com/prometheus/common/model/model.go similarity index 100% rename from vendor/github.com/m3db/prometheus_common/model/model.go rename to vendor/github.com/prometheus/common/model/model.go diff --git a/vendor/github.com/m3db/prometheus_common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go similarity index 100% rename from vendor/github.com/m3db/prometheus_common/model/signature.go rename to vendor/github.com/prometheus/common/model/signature.go diff --git a/vendor/github.com/m3db/prometheus_common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go similarity index 100% rename from vendor/github.com/m3db/prometheus_common/model/silence.go rename to vendor/github.com/prometheus/common/model/silence.go diff --git a/vendor/github.com/m3db/prometheus_common/model/time.go b/vendor/github.com/prometheus/common/model/time.go similarity index 65% rename from vendor/github.com/m3db/prometheus_common/model/time.go rename to vendor/github.com/prometheus/common/model/time.go index 74ed5a9f7..7f67b16e4 100644 --- a/vendor/github.com/m3db/prometheus_common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -14,6 +14,8 @@ package model import ( + "encoding/json" + "errors" "fmt" "math" "regexp" @@ -43,7 +45,7 @@ const ( // (1970-01-01 00:00 UTC) excluding leap seconds. type Time int64 -// Interval describes and interval between two timestamps. +// Interval describes an interval between two timestamps. type Interval struct { Start, End Time } @@ -150,7 +152,13 @@ func (t *Time) UnmarshalJSON(b []byte) error { return err } - *t = Time(v + va) + // If the value was something like -0.1 the negative is lost in the + // parsing because of the leading zero, this ensures that we capture it. + if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { + *t = Time(v+va) * -1 + } else { + *t = Time(v + va) + } default: return fmt.Errorf("invalid time %q", string(b)) @@ -175,73 +183,118 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") +var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. func ParseDuration(durationStr string) (Duration, error) { + switch durationStr { + case "0": + // Allow 0 without a unit. + return 0, nil + case "": + return 0, fmt.Errorf("empty duration string") + } matches := durationRE.FindStringSubmatch(durationStr) - if len(matches) != 3 { + if matches == nil { return 0, fmt.Errorf("not a valid duration string: %q", durationStr) } - var ( - n, _ = strconv.Atoi(matches[1]) - dur = time.Duration(n) * time.Millisecond - ) - switch unit := matches[2]; unit { - case "y": - dur *= 1000 * 60 * 60 * 24 * 365 - case "w": - dur *= 1000 * 60 * 60 * 24 * 7 - case "d": - dur *= 1000 * 60 * 60 * 24 - case "h": - dur *= 1000 * 60 * 60 - case "m": - dur *= 1000 * 60 - case "s": - dur *= 1000 - case "ms": - // Value already correct - default: - return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + var dur time.Duration + + // Parse the match at pos `pos` in the regex and use `mult` to turn that + // into ms, then add that value to the total parsed duration. + var overflowErr error + m := func(pos int, mult time.Duration) { + if matches[pos] == "" { + return + } + n, _ := strconv.Atoi(matches[pos]) + + // Check if the provided duration overflows time.Duration (> ~ 290years). + if n > int((1<<63-1)/mult/time.Millisecond) { + overflowErr = errors.New("duration out of range") + } + d := time.Duration(n) * time.Millisecond + dur += d * mult + + if dur < 0 { + overflowErr = errors.New("duration out of range") + } } - return Duration(dur), nil + + m(2, 1000*60*60*24*365) // y + m(4, 1000*60*60*24*7) // w + m(6, 1000*60*60*24) // d + m(8, 1000*60*60) // h + m(10, 1000*60) // m + m(12, 1000) // s + m(14, 1) // ms + + return Duration(dur), overflowErr } func (d Duration) String() string { var ( - ms = int64(time.Duration(d) / time.Millisecond) - unit = "ms" + ms = int64(time.Duration(d) / time.Millisecond) + r = "" ) if ms == 0 { return "0s" } - factors := map[string]int64{ - "y": 1000 * 60 * 60 * 24 * 365, - "w": 1000 * 60 * 60 * 24 * 7, - "d": 1000 * 60 * 60 * 24, - "h": 1000 * 60 * 60, - "m": 1000 * 60, - "s": 1000, - "ms": 1, + + f := func(unit string, mult int64, exact bool) { + if exact && ms%mult != 0 { + return + } + if v := ms / mult; v > 0 { + r += fmt.Sprintf("%d%s", v, unit) + ms -= v * mult + } } - switch int64(0) { - case ms % factors["y"]: - unit = "y" - case ms % factors["w"]: - unit = "w" - case ms % factors["d"]: - unit = "d" - case ms % factors["h"]: - unit = "h" - case ms % factors["m"]: - unit = "m" - case ms % factors["s"]: - unit = "s" + // Only format years and weeks if the remainder is zero, as it is often + // easier to read 90d than 12w6d. + f("y", 1000*60*60*24*365, true) + f("w", 1000*60*60*24*7, true) + + f("d", 1000*60*60*24, false) + f("h", 1000*60*60, false) + f("m", 1000*60, false) + f("s", 1000, false) + f("ms", 1, false) + + return r +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *Duration) UnmarshalJSON(bytes []byte) error { + var s string + if err := json.Unmarshal(bytes, &s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err } - return fmt.Sprintf("%v%v", ms/factors[unit], unit) + *d = dur + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (d *Duration) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (d *Duration) UnmarshalText(text []byte) error { + var err error + *d, err = ParseDuration(string(text)) + return err } // MarshalYAML implements the yaml.Marshaler interface. diff --git a/vendor/github.com/m3db/prometheus_common/model/value.go b/vendor/github.com/prometheus/common/model/value.go similarity index 100% rename from vendor/github.com/m3db/prometheus_common/model/value.go rename to vendor/github.com/prometheus/common/model/value.go diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore new file mode 100644 index 000000000..25e3659ab --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -0,0 +1 @@ +/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml new file mode 100644 index 000000000..0aa09edac --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -0,0 +1,4 @@ +--- +linters: + enable: + - golint diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9a1aff412 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Prometheus Community Code of Conduct + +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md new file mode 100644 index 000000000..943de7615 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -0,0 +1,121 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) a suitable maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). + +* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) + +## Steps to Contribute + +Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. + +Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). + +For quickly compiling and testing your changes do: +``` +make test # Make sure all the tests pass before you commit and push :) +``` + +We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. + +## Pull Request Checklist + +* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. + +* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). + +* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). + +* Add tests relevant to the fixed bug or new feature. + +## Dependency management + +The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. + +All dependencies are vendored in the `vendor/` directory. + +To add or update a new dependency, use the `go get` command: + +```bash +# Pick the latest tagged release. +go get example.com/some/module/pkg + +# Pick a specific version. +go get example.com/some/module/pkg@vX.Y.Z +``` + +Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: + + +```bash +# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. +GO111MODULE=on go mod tidy + +GO111MODULE=on go mod vendor +``` + +You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. + + +## API Implementation Guidelines + +### Naming and Documentation + +Public functions and structs should normally be named according to the file(s) being read and parsed. For example, +the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function +should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). + +### Reading vs. Parsing + +Most functionality in this library consists of reading files and then parsing the text into structured data. In most +cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and +a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested +directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types +such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files +in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. + +### /proc and /sys filesystem I/O + +The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. +Many of the files are changing continuously and the data being read can in some cases change between subsequent +reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls +to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the +full file in a single operation using an internal utility function called `util.ReadFileNoStat`. +This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of +the file. + +Note that parsing the file's contents can still be performed one line at a time. This is done by first reading +the full file, and then using a scanner on the `[]byte` or `string` containing the data. + +``` + data, err := util.ReadFileNoStat("/proc/cpuinfo") + if err != nil { + return err + } + reader := bytes.NewReader(data) + scanner := bufio.NewScanner(reader) +``` + +The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files +can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does +not bother to check the size of the file before reading. +``` + data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") +``` + diff --git a/vendor/github.com/m3db/prometheus_procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE similarity index 100% rename from vendor/github.com/m3db/prometheus_procfs/LICENSE rename to vendor/github.com/prometheus/procfs/LICENSE diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md new file mode 100644 index 000000000..56ba67d3e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -0,0 +1,2 @@ +* Johannes 'fish' Ziemke @discordianfish +* Paul Gier @pgier diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile new file mode 100644 index 000000000..616a0d25e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -0,0 +1,29 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include Makefile.common + +%/.unpacked: %.ttar + @echo ">> extracting fixtures" + ./ttar -C $(dir $*) -x -f $*.ttar + touch $@ + +update_fixtures: + rm -vf fixtures/.unpacked + ./ttar -c -f fixtures.ttar fixtures/ + +.PHONY: build +build: + +.PHONY: test +test: fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common new file mode 100644 index 000000000..3ac29c636 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -0,0 +1,302 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +GOVENDOR := +GO111MODULE := +ifeq (, $(PRE_GO_111)) + ifneq (,$(wildcard go.mod)) + # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). + GO111MODULE := on + + ifneq (,$(wildcard vendor)) + # Always use the local vendor/ directory to satisfy the dependencies. + GOOPTS := $(GOOPTS) -mod=vendor + endif + endif +else + ifneq (,$(wildcard go.mod)) + ifneq (,$(wildcard vendor)) +$(warning This repository requires Go >= 1.11 because of Go modules) +$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') + endif + else + # This repository isn't using Go modules (yet). + GOVENDOR := $(FIRST_GOPATH)/bin/govendor + endif +endif +PROMU := $(FIRST_GOPATH)/bin/promu +pkgs = ./... + +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif + +GOTEST := $(GO) test +GOTEST_DIR := +ifneq ($(CIRCLE_JOB),) +ifneq ($(shell which gotestsum),) + GOTEST_DIR := test-results + GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- +endif +endif + +PROMU_VERSION ?= 0.7.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.18.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif +endif + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKERFILE_PATH ?= ./Dockerfile +DOCKERBUILD_CONTEXT ?= ./ +DOCKER_REPO ?= prom + +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-all +common-all: precheck style check_license lint unused build test + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(GO) mod download +else + $(GO) get $(GOOPTS) -t ./... +endif + +.PHONY: update-go-deps +update-go-deps: + @echo ">> updating Go dependencies" + @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get $$m; \ + done + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifneq (,$(wildcard vendor)) + GO111MODULE=$(GO111MODULE) $(GO) mod vendor +endif + +.PHONY: common-test-short +common-test-short: $(GOTEST_DIR) + @echo ">> running short tests" + GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: $(GOTEST_DIR) + @echo ">> running all tests" + GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + +$(GOTEST_DIR): + @mkdir -p $@ + +.PHONY: common-format +common-format: + @echo ">> formatting code" + GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" +ifdef GO111MODULE +# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. +# Otherwise staticcheck might fail randomly for some reason not yet explained. + GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) +else + $(GOLANGCI_LINT) run $(pkgs) +endif +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint + +.PHONY: common-unused +common-unused: $(GOVENDOR) +ifdef GOVENDOR + @echo ">> running check for unused packages" + @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' +else +ifdef GO111MODULE + @echo ">> running check for unused/missing packages in go.mod" + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifeq (,$(wildcard vendor)) + @git diff --exit-code -- go.sum go.mod +else + @echo ">> running check for unused packages in vendor/" + GO111MODULE=$(GO111MODULE) $(GO) mod vendor + @git diff --exit-code -- go.sum go.mod vendor/ +endif +endif +endif + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + -f $(DOCKERFILE_PATH) \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + $(DOCKERBUILD_CONTEXT) + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + +DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + $(eval PROMU_TMP := $(shell mktemp -d)) + curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) + mkdir -p $(FIRST_GOPATH)/bin + cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu + rm -r $(PROMU_TMP) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): + mkdir -p $(FIRST_GOPATH)/bin + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ + | sed -e '/install -d/d' \ + | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) +endif + +ifdef GOVENDOR +.PHONY: $(GOVENDOR) +$(GOVENDOR): + GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/vendor/github.com/m3db/prometheus_procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE similarity index 100% rename from vendor/github.com/m3db/prometheus_procfs/NOTICE rename to vendor/github.com/prometheus/procfs/NOTICE diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 000000000..55d1e3261 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,61 @@ +# procfs + +This package provides functions to retrieve system, kernel, and process +metrics from the pseudo-filesystems /proc and /sys. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) + +## Usage + +The procfs library is organized by packages based on whether the gathered data is coming from +/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, +/sys, or both. For example, cpu statistics are gathered from +`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount +point is initialized, and then the stat information is read. + +```go +fs, err := procfs.NewFS("/proc") +stats, err := fs.Stat() +``` + +Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. + +```go + fs, err := blockdevice.NewFS("/proc", "/sys") + stats, err := fs.ProcDiskstats() +``` + +## Package Organization + +The packages in this project are organized according to (1) whether the data comes from the `/proc` or +`/sys` filesystem and (2) the type of information being retrieved. For example, most process information +can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives +is available in the `blockdevices` sub-package. + +## Building and Testing + +The procfs library is intended to be built as part of another application, so there are no distributable binaries. +However, most of the API includes unit tests which can be run with `make test`. + +### Updating Test Fixtures + +The procfs library includes a set of test fixtures which include many example files from +the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file +which is extracted automatically during testing. To add/update the test fixtures, first +ensure the `fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make fixtures/.unpacked` or just `make test`. + +```bash +rm -rf fixtures +make test +``` + +Next, make the required changes to the extracted files in the `fixtures` directory. When +the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file +based on the updated `fixtures` directory. And finally, verify the changes using +`git diff fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md new file mode 100644 index 000000000..67741f015 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/SECURITY.md @@ -0,0 +1,6 @@ +# Reporting a security issue + +The Prometheus security policy, including how to report vulnerabilities, can be +found here: + +https://prometheus.io/docs/operating/security/ diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go new file mode 100644 index 000000000..4e47e6172 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "net" + "strings" +) + +// ARPEntry contains a single row of the columnar data represented in +// /proc/net/arp. +type ARPEntry struct { + // IP address + IPAddr net.IP + // MAC address + HWAddr net.HardwareAddr + // Name of the device + Device string +} + +// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, +// and then return a slice of ARPEntry's. +func (fs FS) GatherARPEntries() ([]ARPEntry, error) { + data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + if err != nil { + return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) + } + + return parseARPEntries(data) +} + +func parseARPEntries(data []byte) ([]ARPEntry, error) { + lines := strings.Split(string(data), "\n") + entries := make([]ARPEntry, 0) + var err error + const ( + expectedDataWidth = 6 + expectedHeaderWidth = 9 + ) + for _, line := range lines { + columns := strings.Fields(line) + width := len(columns) + + if width == expectedHeaderWidth || width == 0 { + continue + } else if width == expectedDataWidth { + entry, err := parseARPEntry(columns) + if err != nil { + return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) + } + entries = append(entries, entry) + } else { + return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + } + + } + + return entries, err +} + +func parseARPEntry(columns []string) (ARPEntry, error) { + ip := net.ParseIP(columns[0]) + mac := net.HardwareAddr(columns[3]) + + entry := ARPEntry{ + IPAddr: ip, + HWAddr: mac, + Device: columns[5], + } + + return entry, nil +} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 000000000..f5b7939b2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,85 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) BuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.proc.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go new file mode 100644 index 000000000..5623b24a1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -0,0 +1,481 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +type CPUInfo struct { + Processor uint + VendorID string + CPUFamily string + Model string + ModelName string + Stepping string + Microcode string + CPUMHz float64 + CacheSize string + PhysicalID string + Siblings uint + CoreID string + CPUCores uint + APICID string + InitialAPICID string + FPU string + FPUException string + CPUIDLevel uint + WP string + Flags []string + Bugs []string + BogoMips float64 + CLFlushSize uint + CacheAlignment uint + AddressSizes string + PowerManagement string +} + +var ( + cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) + cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) +) + +// CPUInfo returns information about current system CPUs. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) CPUInfo() ([]CPUInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) + if err != nil { + return nil, err + } + return parseCPUInfo(data) +} + +func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "vendor", "vendor_id": + cpuinfo[i].VendorID = field[1] + case "cpu family": + cpuinfo[i].CPUFamily = field[1] + case "model": + cpuinfo[i].Model = field[1] + case "model name": + cpuinfo[i].ModelName = field[1] + case "stepping": + cpuinfo[i].Stepping = field[1] + case "microcode": + cpuinfo[i].Microcode = field[1] + case "cpu MHz": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "cache size": + cpuinfo[i].CacheSize = field[1] + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "apicid": + cpuinfo[i].APICID = field[1] + case "initial apicid": + cpuinfo[i].InitialAPICID = field[1] + case "fpu": + cpuinfo[i].FPU = field[1] + case "fpu_exception": + cpuinfo[i].FPUException = field[1] + case "cpuid level": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUIDLevel = uint(v) + case "wp": + cpuinfo[i].WP = field[1] + case "flags": + cpuinfo[i].Flags = strings.Fields(field[1]) + case "bugs": + cpuinfo[i].Bugs = strings.Fields(field[1]) + case "bogomips": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "clflush size": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CLFlushSize = uint(v) + case "cache_alignment": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CacheAlignment = uint(v) + case "address sizes": + cpuinfo[i].AddressSizes = field[1] + case "power management": + cpuinfo[i].PowerManagement = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + if !match || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + featuresLine := "" + commonCPUInfo := CPUInfo{} + i := 0 + if strings.TrimSpace(field[0]) == "Processor" { + commonCPUInfo = CPUInfo{ModelName: field[1]} + i = -1 + } else { + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo = []CPUInfo{firstcpu} + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "BogoMIPS": + if i == -1 { + cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor + i++ + cpuinfo[i].Processor = 0 + } + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "Features": + featuresLine = line + case "model name": + cpuinfo[i].ModelName = field[1] + } + } + fields := strings.SplitN(featuresLine, ": ", 2) + for i := range cpuinfo { + cpuinfo[i].Flags = strings.Fields(fields[1]) + } + return cpuinfo, nil + +} + +func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + commonCPUInfo := CPUInfo{VendorID: field[1]} + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "bogomips per cpu": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + commonCPUInfo.BogoMips = v + case "features": + commonCPUInfo.Flags = strings.Fields(field[1]) + } + if strings.HasPrefix(line, "processor") { + match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) + if len(match) < 2 { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + cpu := commonCPUInfo + v, err := strconv.ParseUint(match[1], 0, 32) + if err != nil { + return nil, err + } + cpu.Processor = uint(v) + cpuinfo = append(cpuinfo, cpu) + } + if strings.HasPrefix(line, "cpu number") { + break + } + } + + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "cpu number": + i++ + case "cpu MHz dynamic": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + } + } + + return cpuinfo, nil +} + +func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "cpu model": + cpuinfo[i].ModelName = field[1] + case "BogoMIPS": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "cpu": + cpuinfo[i].VendorID = field[1] + case "clock": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + case "hart": + cpuinfo[i].CoreID = field[1] + case "isa": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode + return nil, errors.New("not implemented") +} + +// firstNonEmptyLine advances the scanner to the first non-empty line +// and returns the contents of that line +func firstNonEmptyLine(scanner *bufio.Scanner) string { + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) != "" { + return line + } + } + return "" +} diff --git a/vendor/github.com/blevesearch/bleve/v2/config_app.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go similarity index 64% rename from vendor/github.com/blevesearch/bleve/v2/config_app.go rename to vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 112d0b600..44b590ed3 100644 --- a/vendor/github.com/blevesearch/bleve/v2/config_app.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -1,10 +1,9 @@ -// Copyright (c) 2014 Couchbase, Inc. -// +// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,12 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build appengine appenginevm - -package bleve +// +build linux +// +build arm arm64 -// in the appengine environment we cannot support disk based indexes -// so we do no extra configuration in this method -func initDisk() { +package procfs -} +var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go new file mode 100644 index 000000000..91e272573 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build mips mipsle mips64 mips64le + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go new file mode 100644 index 000000000..95b5b4ec4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x + +package procfs + +var parseCPUInfo = parseCPUInfoDummy diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go new file mode 100644 index 000000000..6068bd571 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build ppc64 ppc64le + +package procfs + +var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go new file mode 100644 index 000000000..e83c2e207 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build riscv riscv64 + +package procfs + +var parseCPUInfo = parseCPUInfoRISCV diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go new file mode 100644 index 000000000..26814eeba --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go new file mode 100644 index 000000000..d5bedf97f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build 386 amd64 + +package procfs + +var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go new file mode 100644 index 000000000..5048ad1f2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -0,0 +1,153 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Crypto holds info parsed from /proc/crypto. +type Crypto struct { + Alignmask *uint64 + Async bool + Blocksize *uint64 + Chunksize *uint64 + Ctxsize *uint64 + Digestsize *uint64 + Driver string + Geniv string + Internal string + Ivsize *uint64 + Maxauthsize *uint64 + MaxKeysize *uint64 + MinKeysize *uint64 + Module string + Name string + Priority *int64 + Refcnt *int64 + Seedsize *uint64 + Selftest string + Type string + Walksize *uint64 +} + +// Crypto parses an crypto-file (/proc/crypto) and returns a slice of +// structs containing the relevant info. More information available here: +// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html +func (fs FS) Crypto() ([]Crypto, error) { + path := fs.proc.Path("crypto") + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, fmt.Errorf("error reading crypto %q: %w", path, err) + } + + crypto, err := parseCrypto(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) + } + + return crypto, nil +} + +// parseCrypto parses a /proc/crypto stream into Crypto elements. +func parseCrypto(r io.Reader) ([]Crypto, error) { + var out []Crypto + + s := bufio.NewScanner(r) + for s.Scan() { + text := s.Text() + switch { + case strings.HasPrefix(text, "name"): + // Each crypto element begins with its name. + out = append(out, Crypto{}) + case text == "": + continue + } + + kv := strings.Split(text, ":") + if len(kv) != 2 { + return nil, fmt.Errorf("malformed crypto line: %q", text) + } + + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + + // Parse the key/value pair into the currently focused element. + c := &out[len(out)-1] + if err := c.parseKV(k, v); err != nil { + return nil, err + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +// parseKV parses a key/value pair into the appropriate field of c. +func (c *Crypto) parseKV(k, v string) error { + vp := util.NewValueParser(v) + + switch k { + case "async": + // Interpret literal yes as true. + c.Async = v == "yes" + case "blocksize": + c.Blocksize = vp.PUInt64() + case "chunksize": + c.Chunksize = vp.PUInt64() + case "digestsize": + c.Digestsize = vp.PUInt64() + case "driver": + c.Driver = v + case "geniv": + c.Geniv = v + case "internal": + c.Internal = v + case "ivsize": + c.Ivsize = vp.PUInt64() + case "maxauthsize": + c.Maxauthsize = vp.PUInt64() + case "max keysize": + c.MaxKeysize = vp.PUInt64() + case "min keysize": + c.MinKeysize = vp.PUInt64() + case "module": + c.Module = v + case "name": + c.Name = v + case "priority": + c.Priority = vp.PInt64() + case "refcnt": + c.Refcnt = vp.PInt64() + case "seedsize": + c.Seedsize = vp.PUInt64() + case "selftest": + c.Selftest = v + case "type": + c.Type = v + case "walksize": + c.Walksize = vp.PUInt64() + } + + return vp.Err() +} diff --git a/vendor/github.com/m3db/prometheus_procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go similarity index 100% rename from vendor/github.com/m3db/prometheus_procfs/doc.go rename to vendor/github.com/prometheus/procfs/doc.go diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar new file mode 100644 index 000000000..1e76173da --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -0,0 +1,6553 @@ +# Archive created by ttar -c -f fixtures.ttar fixtures/ +Directory: fixtures +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/cmdline +Lines: 1 +vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/comm +Lines: 1 +vim +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/cwd +SymlinkTo: /usr/bin +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/environ +Lines: 1 +PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/exe +SymlinkTo: /usr/bin/vim +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fd/10 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231/fdinfo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/0 +Lines: 6 +pos: 0 +flags: 02004000 +mnt_id: 13 +inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 +inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a +inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/1 +Lines: 4 +pos: 0 +flags: 02004002 +mnt_id: 13 +eventfd-count: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/10 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/2 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/3 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/io +Lines: 7 +rchar: 750339 +wchar: 818609 +syscr: 7405 +syscw: 5245 +read_bytes: 1024 +write_bytes: 2048 +cancelled_write_bytes: -1024 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 62898 62898 processes +Max open files 2048 4096 files +Max locked memory 18446744073708503040 18446744073708503040 bytes +Max address space 8589934592 unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 62898 62898 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/mountstats +Lines: 20 +device rootfs mounted on / with fstype rootfs +device sysfs mounted on /sys with fstype sysfs +device proc mounted on /proc with fstype proc +device /dev/sda1 mounted on / with fstype ext4 +device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none + age: 13968 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured + sec: flavor=1,pseudoflavor=1 + events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 + bytes: 1207640230 0 0 0 1210214218 0 295483 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 1298 1298 0 207680 1210292152 6 79386 79407 + WRITE: 0 0 0 0 0 0 0 0 + ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/net/dev +Lines: 4 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231/ns +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/ns/mnt +SymlinkTo: mnt:[4026531840] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/ns/net +SymlinkTo: net:[4026531993] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/root +SymlinkTo: / +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/schedstat +Lines: 1 +411605849 93680043 79 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/smaps +Lines: 252 +00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager +Size: 8900 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 2952 kB +Pss: 2952 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 2952 kB +Private_Dirty: 0 kB +Referenced: 2864 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex mr mw me dw sd +00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager +Size: 10236 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 6152 kB +Pss: 6152 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 6152 kB +Private_Dirty: 0 kB +Referenced: 5308 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd mr mw me dw sd +016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager +Size: 424 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 176 kB +Pss: 176 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 84 kB +Private_Dirty: 92 kB +Referenced: 176 kB +Anonymous: 92 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 12 kB +SwapPss: 12 kB +Locked: 0 kB +VmFlags: rd wr mr mw me dw ac sd +0171a000-0173f000 rw-p 00000000 00:00 0 +Size: 148 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 76 kB +Pss: 76 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 76 kB +Referenced: 76 kB +Anonymous: 76 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +c000000000-c000400000 rw-p 00000000 00:00 0 +Size: 4096 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 2564 kB +Pss: 2564 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 20 kB +Private_Dirty: 2544 kB +Referenced: 2544 kB +Anonymous: 2564 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 1100 kB +SwapPss: 1100 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +c000400000-c001600000 rw-p 00000000 00:00 0 +Size: 18432 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 16024 kB +Pss: 16024 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 5864 kB +Private_Dirty: 10160 kB +Referenced: 11944 kB +Anonymous: 16024 kB +LazyFree: 5848 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 440 kB +SwapPss: 440 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd nh +c001600000-c004000000 rw-p 00000000 00:00 0 +Size: 43008 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 +Size: 38596 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 1992 kB +Pss: 1992 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 476 kB +Private_Dirty: 1516 kB +Referenced: 1828 kB +Anonymous: 1992 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 384 kB +SwapPss: 384 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] +Size: 132 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 8 kB +Pss: 8 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 8 kB +Referenced: 8 kB +Anonymous: 8 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 4 kB +SwapPss: 4 kB +Locked: 0 kB +VmFlags: rd wr mr mw me gd ac +7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] +Size: 12 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd mr pf io de dd sd +7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] +Size: 8 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 4 kB +Pss: 0 kB +Shared_Clean: 4 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 4 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex mr mw me de sd +ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] +Size: 4 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/smaps_rollup +Lines: 17 +00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] +Rss: 29948 kB +Pss: 29944 kB +Shared_Clean: 4 kB +Shared_Dirty: 0 kB +Private_Clean: 15548 kB +Private_Dirty: 14396 kB +Referenced: 24752 kB +Anonymous: 20756 kB +LazyFree: 5848 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 1940 kB +SwapPss: 1940 kB +Locked: 0 kB +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/stat +Lines: 1 +26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/status +Lines: 53 + +Name: prometheus +Umask: 0022 +State: S (sleeping) +Tgid: 26231 +Ngid: 0 +Pid: 26231 +PPid: 1 +TracerPid: 0 +Uid: 1000 1000 1000 0 +Gid: 1001 1001 1001 0 +FDSize: 128 +Groups: +NStgid: 1 +NSpid: 1 +NSpgid: 1 +NSsid: 1 +VmPeak: 58472 kB +VmSize: 58440 kB +VmLck: 0 kB +VmPin: 0 kB +VmHWM: 8028 kB +VmRSS: 6716 kB +RssAnon: 2092 kB +RssFile: 4624 kB +RssShmem: 0 kB +VmData: 2580 kB +VmStk: 136 kB +VmExe: 948 kB +VmLib: 6816 kB +VmPTE: 128 kB +VmPMD: 12 kB +VmSwap: 660 kB +HugetlbPages: 0 kB +Threads: 1 +SigQ: 8/63965 +SigPnd: 0000000000000000 +ShdPnd: 0000000000000000 +SigBlk: 7be3c0fe28014a03 +SigIgn: 0000000000001000 +SigCgt: 00000001800004ec +CapInh: 0000000000000000 +CapPrm: 0000003fffffffff +CapEff: 0000003fffffffff +CapBnd: 0000003fffffffff +CapAmb: 0000000000000000 +Seccomp: 0 +Cpus_allowed: ff +Cpus_allowed_list: 0-7 +Mems_allowed: 00000000,00000001 +Mems_allowed_list: 0 +voluntary_ctxt_switches: 4742839 +nonvoluntary_ctxt_switches: 1727500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/wchan +Lines: 1 +poll_schedule_timeoutEOF +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26232 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/cmdline +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/comm +Lines: 1 +ata_sff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/cwd +SymlinkTo: /does/not/exist +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26232/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/fd/4 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/maps +Lines: 9 +55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat +55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat +55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] +7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive +7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so +7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] +7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] +7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] +ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/root +SymlinkTo: /does/not/exist +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/stat +Lines: 1 +33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/wchan +Lines: 1 +0EOF +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26233 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26233/cmdline +Lines: 1 +com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26233/schedstat +Lines: 8 + ____________________________________ +< this is a malformed schedstat file > + ------------------------------------ + \ ^__^ + \ (oo)\_______ + (__)\ )\/\ + ||----w | + || || +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26234 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26234/maps +Lines: 4 +08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh +08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh +0808c000-08146000 rwxp 00000000 00:00 0 +40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/584 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/584/stat +Lines: 2 +1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 +#!/bin/cat /proc/self/stat +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/cpuinfo +Lines: 216 +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 799.998 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.037 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.010 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.028 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 799.989 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.083 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.017 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.030 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/crypto +Lines: 972 +name : ccm(aes) +driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) +module : ccm +priority : 300 +refcnt : 4 +selftest : passed +internal : no +type : aead +async : no +blocksize : 1 +ivsize : 16 +maxauthsize : 16 +geniv : + +name : cbcmac(aes) +driver : cbcmac(aes-aesni) +module : ccm +priority : 300 +refcnt : 7 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 16 + +name : ecdh +driver : ecdh-generic +module : ecdh_generic +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : kpp +async : yes + +name : ecb(arc4) +driver : ecb(arc4)-generic +module : arc4 +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 1 +max keysize : 256 +ivsize : 0 +chunksize : 1 +walksize : 1 + +name : arc4 +driver : arc4-generic +module : arc4 +priority : 0 +refcnt : 3 +selftest : passed +internal : no +type : cipher +blocksize : 1 +min keysize : 1 +max keysize : 256 + +name : crct10dif +driver : crct10dif-pclmul +module : crct10dif_pclmul +priority : 200 +refcnt : 2 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 2 + +name : crc32 +driver : crc32-pclmul +module : crc32_pclmul +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : __ghash +driver : cryptd(__ghash-pclmulqdqni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : ahash +async : yes +blocksize : 16 +digestsize : 16 + +name : ghash +driver : ghash-clmulni +module : ghash_clmulni_intel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : ahash +async : yes +blocksize : 16 +digestsize : 16 + +name : __ghash +driver : __ghash-pclmulqdqni +module : ghash_clmulni_intel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : shash +blocksize : 16 +digestsize : 16 + +name : crc32c +driver : crc32c-intel +module : crc32c_intel +priority : 200 +refcnt : 5 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : cbc(aes) +driver : cbc(aes-aesni) +module : kernel +priority : 300 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : ctr(aes) +driver : ctr(aes-aesni) +module : kernel +priority : 300 +refcnt : 5 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : pkcs1pad(rsa,sha256) +driver : pkcs1pad(rsa-generic,sha256) +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : akcipher + +name : __xts(aes) +driver : cryptd(__xts-aes-aesni) +module : kernel +priority : 451 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : xts(aes) +driver : xts-aes-aesni +module : kernel +priority : 401 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ctr(aes) +driver : cryptd(__ctr-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 1 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : ctr(aes) +driver : ctr-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __cbc(aes) +driver : cryptd(__cbc-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : cbc(aes) +driver : cbc-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ecb(aes) +driver : cryptd(__ecb-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : ecb(aes) +driver : ecb-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : __generic-gcm-aes-aesni +driver : cryptd(__driver-generic-gcm-aes-aesni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : yes +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : gcm(aes) +driver : generic-gcm-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : aead +async : yes +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : __generic-gcm-aes-aesni +driver : __driver-generic-gcm-aes-aesni +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : no +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : __gcm-aes-aesni +driver : cryptd(__driver-gcm-aes-aesni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : yes +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : rfc4106(gcm(aes)) +driver : rfc4106-gcm-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : aead +async : yes +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : __gcm-aes-aesni +driver : __driver-gcm-aes-aesni +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : no +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : __xts(aes) +driver : __xts-aes-aesni +module : kernel +priority : 401 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ctr(aes) +driver : __ctr-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __cbc(aes) +driver : __cbc-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ecb(aes) +driver : __ecb-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : __aes +driver : __aes-aesni +module : kernel +priority : 300 +refcnt : 1 +selftest : passed +internal : yes +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : aes +driver : aes-aesni +module : kernel +priority : 300 +refcnt : 8 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : hmac(sha1) +driver : hmac(sha1-generic) +module : kernel +priority : 100 +refcnt : 9 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 20 + +name : ghash +driver : ghash-generic +module : kernel +priority : 100 +refcnt : 3 +selftest : passed +internal : no +type : shash +blocksize : 16 +digestsize : 16 + +name : jitterentropy_rng +driver : jitterentropy_rng +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha256 +module : kernel +priority : 221 +refcnt : 2 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha512 +module : kernel +priority : 220 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha384 +module : kernel +priority : 219 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha1 +module : kernel +priority : 218 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha256 +module : kernel +priority : 217 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha512 +module : kernel +priority : 216 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha384 +module : kernel +priority : 215 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha1 +module : kernel +priority : 214 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes256 +module : kernel +priority : 213 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes192 +module : kernel +priority : 212 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes128 +module : kernel +priority : 211 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : hmac(sha256) +driver : hmac(sha256-generic) +module : kernel +priority : 100 +refcnt : 10 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 32 + +name : stdrng +driver : drbg_pr_hmac_sha256 +module : kernel +priority : 210 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha512 +module : kernel +priority : 209 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha384 +module : kernel +priority : 208 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha1 +module : kernel +priority : 207 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha256 +module : kernel +priority : 206 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha512 +module : kernel +priority : 205 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha384 +module : kernel +priority : 204 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha1 +module : kernel +priority : 203 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes256 +module : kernel +priority : 202 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes192 +module : kernel +priority : 201 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes128 +module : kernel +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : 842 +driver : 842-scomp +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : 842 +driver : 842-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : lzo-rle +driver : lzo-rle-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : lzo-rle +driver : lzo-rle-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : lzo +driver : lzo-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : lzo +driver : lzo-generic +module : kernel +priority : 0 +refcnt : 9 +selftest : passed +internal : no +type : compression + +name : crct10dif +driver : crct10dif-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 2 + +name : crc32c +driver : crc32c-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : zlib-deflate +driver : zlib-deflate-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : deflate +driver : deflate-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : deflate +driver : deflate-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : aes +driver : aes-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : sha224 +driver : sha224-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 28 + +name : sha256 +driver : sha256-generic +module : kernel +priority : 100 +refcnt : 11 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 32 + +name : sha1 +driver : sha1-generic +module : kernel +priority : 100 +refcnt : 11 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 20 + +name : md5 +driver : md5-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 16 + +name : ecb(cipher_null) +driver : ecb-cipher_null +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 0 +max keysize : 0 +ivsize : 0 +chunksize : 1 +walksize : 1 + +name : digest_null +driver : digest_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 0 + +name : compress_null +driver : compress_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : cipher_null +driver : cipher_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 1 +min keysize : 0 +max keysize : 0 + +name : rsa +driver : rsa-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : akcipher + +name : dh +driver : dh-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : kpp + +name : aes +driver : aes-asm +module : kernel +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +Mode: 444 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/diskstats +Lines: 52 + 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 + 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 + 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 + 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 + 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 + 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 + 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 + 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 + 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 + 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 + 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 + 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 + 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 + 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 + 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 + 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 + 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 + 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 + 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 + 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 + 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 + 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 + 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 + 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 + 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 + 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 + 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 + 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 + 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 + 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 + 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 + 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 + 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 + 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 + 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 + 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 + 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 + 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 + 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 + 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 + 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 + 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 + 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 + 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 + 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 + 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 + 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 + 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 + 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 + 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 + 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 + 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/fs/fscache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/fs/fscache/stats +Lines: 24 +FS-Cache statistics +Cookies: idx=3 dat=67877 spc=0 +Objects: alc=67473 nal=0 avl=67473 ded=388 +ChkAux : non=12 ok=33 upd=44 obs=55 +Pages : mrk=547164 unc=364577 +Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 +Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 +Invals : n=14 run=13 +Updates: n=7 nul=3 run=8 +Relinqs: n=394 nul=1 wcr=2 rtr=3 +AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 +Allocs : n=20 ok=19 wt=18 nbf=17 int=16 +Allocs : ops=15 owt=14 abt=13 +Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 +Retrvls: ops=151959 owt=42747 abt=44 +Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 +Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 +VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 +Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 +Ops : ini=377538 dfr=27 rel=377538 gc=37 +CacheOp: alo=1 luo=2 luc=3 gro=4 +CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 +CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 +CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/fs/xfs/stat +Lines: 23 +extent_alloc 92447 97589 92448 93751 +abt 0 0 0 0 +blk_map 1767055 188820 184891 92447 92448 2140766 0 +bmbt 0 0 0 0 +dir 185039 92447 92444 136422 +trans 706 944304 0 +ig 185045 58807 0 126238 0 33637 22 +log 2883 113448 9 17360 739 +push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 +xstrat 92447 0 +rw 107739 94045 +attr 4 0 0 0 +icluster 8677 7849 135802 +vnodes 92601 0 0 0 92444 92444 92444 0 +buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 +abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 +abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 +bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 +fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +qm 0 0 0 0 0 0 0 0 +xpc 399724544 92823103 86219234 +debug 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/loadavg +Lines: 1 +0.02 0.04 0.05 1/497 11947 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/mdstat +Lines: 60 +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] + +md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) + 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] + +md127 : active raid1 sdi2[0] sdj2[1] + 312319552 blocks [2/2] [UU] + +md0 : active raid1 sdi1[0] sdj1[1] + 248896 blocks [2/2] [UU] + +md4 : inactive raid1 sda3[0](F) sdb3[1](S) + 4883648 blocks [2/2] [UU] + +md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] + 195310144 blocks [2/1] [U_] + [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) + 195310144 blocks [2/2] [UU] + [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md201 : active raid1 sda3[0] sdb3[1] + 1993728 blocks super 1.2 [2/2] [UU] + [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec + +md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) + 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] + bitmap: 0/30 pages [0KB], 65536KB chunk + +md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) + 523968 blocks super 1.2 [4/4] [UUUU] + resync=DELAYED + +md10 : active raid0 sda1[0] sdb1[1] + 314159265 blocks 64k chunks + +md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) + 4190208 blocks super 1.2 [2/2] [UU] + resync=PENDING + +md12 : active raid0 sdc2[0] sdd2[1] + 3886394368 blocks super 1.2 512k chunks + +md126 : active raid0 sdb[1] sdc[0] + 1855870976 blocks super external:/md127/0 128k chunks + +md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) + 7932 blocks super external:imsm + +md00 : active raid0 xvdb[0] + 4186624 blocks super 1.2 256k chunks + +md120 : active linear sda1[1] sdb1[0] + 2095104 blocks super 1.2 0k rounding + +md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] + 322560 blocks super 1.2 512k chunks + +unused devices: +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/meminfo +Lines: 42 +MemTotal: 15666184 kB +MemFree: 440324 kB +Buffers: 1020128 kB +Cached: 12007640 kB +SwapCached: 0 kB +Active: 6761276 kB +Inactive: 6532708 kB +Active(anon): 267256 kB +Inactive(anon): 268 kB +Active(file): 6494020 kB +Inactive(file): 6532440 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 0 kB +SwapFree: 0 kB +Dirty: 768 kB +Writeback: 0 kB +AnonPages: 266216 kB +Mapped: 44204 kB +Shmem: 1308 kB +Slab: 1807264 kB +SReclaimable: 1738124 kB +SUnreclaim: 69140 kB +KernelStack: 1616 kB +PageTables: 5288 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 7833092 kB +Committed_AS: 530844 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 36596 kB +VmallocChunk: 34359637840 kB +HardwareCorrupted: 0 kB +AnonHugePages: 12288 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 91136 kB +DirectMap2M: 16039936 kB +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/arp +Lines: 2 +IP address HW type Flags HW address Mask Device +192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/dev +Lines: 6 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed +vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 +docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/ip_vs +Lines: 21 +IP Virtual Server version 1.2.1 (size=4096) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP C0A80016:0CEA wlc + -> C0A85216:0CEA Tunnel 100 248 2 + -> C0A85318:0CEA Tunnel 100 248 2 + -> C0A85315:0CEA Tunnel 100 248 1 +TCP C0A80039:0CEA wlc + -> C0A85416:0CEA Tunnel 0 0 0 + -> C0A85215:0CEA Tunnel 100 1499 0 + -> C0A83215:0CEA Tunnel 100 1498 0 +TCP C0A80037:0CEA wlc + -> C0A8321A:0CEA Tunnel 0 0 0 + -> C0A83120:0CEA Tunnel 100 0 0 +TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh + -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 +FWM 10001000 wlc + -> C0A8321A:0CEA Route 0 0 1 + -> C0A83215:0CEA Route 0 0 2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/ip_vs_stats +Lines: 6 + Total Incoming Outgoing Incoming Outgoing + Conns Packets Packets Bytes Bytes + 16AA370 E33656E5 0 51D8C8883AB3 0 + + Conns/s Pkts/s Pkts/s Bytes/s Bytes/s + 4 1FB3C 0 1282A8F 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/protocols +Lines: 14 +protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em +PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n +PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n +RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n +UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n +UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n +TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y +UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n +UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n +PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n +RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n +UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n +TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y +NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/net/rpc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/rpc/nfs +Lines: 5 +net 18628 0 18628 6 +rpc 4329785 0 4338291 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 +proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/rpc/nfsd +Lines: 11 +rc 0 6 18622 +fh 0 0 0 0 0 +io 157286400 0 +th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 +ra 32 0 0 0 0 0 0 0 0 0 0 0 +net 18628 0 18628 6 +rpc 18628 0 0 0 0 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 +proc4 2 2 10853 +proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/sockstat +Lines: 6 +sockets: used 1602 +TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 +UDP: inuse 12 mem 62 +UDPLITE: inuse 0 +RAW: inuse 0 +FRAG: inuse 0 memory 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/sockstat6 +Lines: 5 +TCP6: inuse 17 +UDP6: inuse 9 +UDPLITE6: inuse 0 +RAW6: inuse 1 +FRAG6: inuse 0 memory 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/softnet_stat +Lines: 2 +00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/softnet_stat.broken +Lines: 1 +00015c73 00020e76 F0000769 00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/tcp +Lines: 4 + sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode + 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/tcp6 +Lines: 3 + sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops + 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 + 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp +Lines: 4 + sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode + 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp6 +Lines: 3 + sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops + 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 + 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp_broken +Lines: 2 + sl local_address rem_address st + 1: 00000000:0016 00000000:0000 0A +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/unix +Lines: 6 +Num RefCount Protocol Flags Type St Inode Path +0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control +0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log +0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 00000003 00000000 00000000 0001 03 5091797 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/unix_without_inode +Lines: 6 +Num RefCount Protocol Flags Type St Path +0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control +0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log +0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 00000003 00000000 00000000 0001 03 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/xfrm_stat +Lines: 28 +XfrmInError 1 +XfrmInBufferError 2 +XfrmInHdrError 4 +XfrmInNoStates 3 +XfrmInStateProtoError 40 +XfrmInStateModeError 100 +XfrmInStateSeqError 6000 +XfrmInStateExpired 4 +XfrmInStateMismatch 23451 +XfrmInStateInvalid 55555 +XfrmInTmplMismatch 51 +XfrmInNoPols 65432 +XfrmInPolBlock 100 +XfrmInPolError 10000 +XfrmOutError 1000000 +XfrmOutBundleGenError 43321 +XfrmOutBundleCheckError 555 +XfrmOutNoStates 869 +XfrmOutStateProtoError 4542 +XfrmOutStateModeError 4 +XfrmOutStateSeqError 543 +XfrmOutStateExpired 565 +XfrmOutPolBlock 43456 +XfrmOutPolDead 7656 +XfrmOutPolError 1454 +XfrmFwdHdrError 6654 +XfrmOutStateInvalid 28765 +XfrmAcquireError 24532 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/pressure +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/cpu +Lines: 1 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/io +Lines: 2 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +full avg10=0.20 avg60=3.00 avg300=4.95 total=25 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/memory +Lines: 2 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +full avg10=0.20 avg60=3.00 avg300=4.95 total=25 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/schedstat +Lines: 6 +version 15 +timestamp 15819019232 +cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 +domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 +cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 +domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/self +SymlinkTo: 26231 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/slabinfo +Lines: 302 +slabinfo - version: 2.1 +# name : tunables : slabdata +pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0 +pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0 +nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0 +kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 +kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0 +kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0 +pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0 +x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0 +iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 +ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0 +bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 +bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0 +bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0 +fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0 +fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0 +squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 +fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 +fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0 +xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0 +xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 +nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 +nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 +nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0 +nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0 +nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 +nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 +rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 +jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 +jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0 +reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 +btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0 +ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0 +ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 +ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0 +ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0 +ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0 +ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0 +ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0 +ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0 +ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0 +jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0 +jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0 +jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 +jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0 +jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0 +jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 +ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0 +mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0 +dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 +dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 +dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0 +kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0 +io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 +dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0 +dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 +aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 +qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 +sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0 +scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0 +virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 +L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 +ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 +fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 +ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0 +RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0 +UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0 +UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0 +tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0 +TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0 +uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0 +sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 +sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0 +sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0 +sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 +btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0 +bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0 +mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0 +isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0 +io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 +kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0 +aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0 +dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 +bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0 +posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0 +iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 +iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0 +iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0 +UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0 +ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0 +ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 +inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 +xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 +ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0 +ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0 +ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0 +PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 +RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0 +UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0 +tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0 +request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0 +TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0 +hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0 +dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 +bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 +eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0 +eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0 +inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0 +scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 +bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0 +request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0 +blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0 +bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0 +biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0 +biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0 +biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0 +biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 +bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0 +ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0 +uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0 +audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0 +sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0 +skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0 +skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0 +configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0 +file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0 +file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0 +fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0 +net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0 +task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0 +taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0 +proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0 +pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0 +proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0 +seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0 +sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 +bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 +shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0 +kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0 +kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0 +mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 +filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0 +inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0 +dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0 +names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0 +hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 +ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 +iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 +lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0 +lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0 +key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0 +uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 +nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 +vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0 +mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0 +fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0 +files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0 +signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0 +sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0 +task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0 +cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0 +anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0 +anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0 +pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0 +Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0 +Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0 +Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 +Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0 +Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0 +trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0 +ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0 +pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0 +radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0 +task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 +vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0 +dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 +kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0 +kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0 +kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0 +kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0 +kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0 +kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0 +kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0 +kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0 +kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0 +kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0 +kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0 +kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0 +kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0 +kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0 +kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0 +kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0 +kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/stat +Lines: 16 +cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 +cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 +cpu1 47869 23 16474 1110787 591 0 46 0 0 0 +cpu2 46504 36 15916 1112321 441 0 326 0 0 0 +cpu3 47054 102 15683 1113230 533 0 60 0 0 0 +cpu4 28413 25 10776 1140321 217 0 8 0 0 0 +cpu5 29271 101 11586 1136270 672 0 30 0 0 0 +cpu6 29152 36 10276 1139721 319 0 29 0 0 0 +cpu7 29098 268 10164 1139282 555 0 31 0 0 0 +intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 38014093 +btime 1418183276 +processes 26442 +procs_running 2 +procs_blocked 1 +softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/swaps +Lines: 2 +Filename Type Size Used Priority +/dev/dm-2 partition 131068 176 -2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/symlinktargets +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/README +Lines: 2 +This directory contains some empty files that are the symlinks the files in the "fd" directory point to. +They are otherwise ignored by the tests +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/abc +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/def +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/ghi +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/uvw +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/xyz +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/kernel +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/kernel/random +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/entropy_avail +Lines: 1 +3943 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/poolsize +Lines: 1 +4096 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs +Lines: 1 +60 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold +Lines: 1 +3072 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/vm +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/admin_reserve_kbytes +Lines: 1 +8192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/block_dump +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/compact_unevictable_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_background_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_background_ratio +Lines: 1 +10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_expire_centisecs +Lines: 1 +3000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_ratio +Lines: 1 +20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_writeback_centisecs +Lines: 1 +500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirtytime_expire_seconds +Lines: 1 +43200 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/drop_caches +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/extfrag_threshold +Lines: 1 +500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/hugetlb_shm_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/laptop_mode +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/legacy_va_layout +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/lowmem_reserve_ratio +Lines: 1 +256 256 32 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/max_map_count +Lines: 1 +65530 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/memory_failure_early_kill +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/memory_failure_recovery +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/min_free_kbytes +Lines: 1 +67584 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/min_slab_ratio +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/min_unmapped_ratio +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/mmap_min_addr +Lines: 1 +65536 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/nr_hugepages +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/nr_overcommit_hugepages +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/numa_stat +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/numa_zonelist_order +Lines: 1 +Node +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/oom_dump_tasks +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/oom_kill_allocating_task +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/overcommit_kbytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/overcommit_memory +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/overcommit_ratio +Lines: 1 +50 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/page-cluster +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/panic_on_oom +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/percpu_pagelist_fraction +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/stat_interval +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/swappiness +Lines: 1 +60 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/user_reserve_kbytes +Lines: 1 +131072 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/vfs_cache_pressure +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/watermark_boost_factor +Lines: 1 +15000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/watermark_scale_factor +Lines: 1 +10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/zone_reclaim_mode +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/zoneinfo +Lines: 262 +Node 0, zone DMA + per-node stats + nr_inactive_anon 230981 + nr_active_anon 547580 + nr_inactive_file 316904 + nr_active_file 346282 + nr_unevictable 115467 + nr_slab_reclaimable 131220 + nr_slab_unreclaimable 47320 + nr_isolated_anon 0 + nr_isolated_file 0 + workingset_nodes 11627 + workingset_refault 466886 + workingset_activate 276925 + workingset_restore 84055 + workingset_nodereclaim 487 + nr_anon_pages 795576 + nr_mapped 215483 + nr_file_pages 761874 + nr_dirty 908 + nr_writeback 0 + nr_writeback_temp 0 + nr_shmem 224925 + nr_shmem_hugepages 0 + nr_shmem_pmdmapped 0 + nr_anon_transparent_hugepages 0 + nr_unstable 0 + nr_vmscan_write 12950 + nr_vmscan_immediate_reclaim 3033 + nr_dirtied 8007423 + nr_written 7752121 + nr_kernel_misc_reclaimable 0 + pages free 3952 + min 33 + low 41 + high 49 + spanned 4095 + present 3975 + managed 3956 + protection: (0, 2877, 7826, 7826, 7826) + nr_free_pages 3952 + nr_zone_inactive_anon 0 + nr_zone_active_anon 0 + nr_zone_inactive_file 0 + nr_zone_active_file 0 + nr_zone_unevictable 0 + nr_zone_write_pending 0 + nr_mlock 0 + nr_page_table_pages 0 + nr_kernel_stack 0 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 1 + numa_miss 0 + numa_foreign 0 + numa_interleave 0 + numa_local 1 + numa_other 0 + pagesets + cpu: 0 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 1 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 2 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 3 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 4 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 5 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 6 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 7 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + node_unreclaimable: 0 + start_pfn: 1 +Node 0, zone DMA32 + pages free 204252 + min 19510 + low 21059 + high 22608 + spanned 1044480 + present 759231 + managed 742806 + protection: (0, 0, 4949, 4949, 4949) + nr_free_pages 204252 + nr_zone_inactive_anon 118558 + nr_zone_active_anon 106598 + nr_zone_inactive_file 75475 + nr_zone_active_file 70293 + nr_zone_unevictable 66195 + nr_zone_write_pending 64 + nr_mlock 4 + nr_page_table_pages 1756 + nr_kernel_stack 2208 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 113952967 + numa_miss 0 + numa_foreign 0 + numa_interleave 0 + numa_local 113952967 + numa_other 0 + pagesets + cpu: 0 + count: 345 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 1 + count: 356 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 2 + count: 325 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 3 + count: 346 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 4 + count: 321 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 5 + count: 316 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 6 + count: 373 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 7 + count: 339 + high: 378 + batch: 63 + vm stats threshold: 48 + node_unreclaimable: 0 + start_pfn: 4096 +Node 0, zone Normal + pages free 18553 + min 11176 + low 13842 + high 16508 + spanned 1308160 + present 1308160 + managed 1268711 + protection: (0, 0, 0, 0, 0) + nr_free_pages 18553 + nr_zone_inactive_anon 112423 + nr_zone_active_anon 440982 + nr_zone_inactive_file 241429 + nr_zone_active_file 275989 + nr_zone_unevictable 49272 + nr_zone_write_pending 844 + nr_mlock 154 + nr_page_table_pages 9750 + nr_kernel_stack 15136 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 162718019 + numa_miss 0 + numa_foreign 0 + numa_interleave 26812 + numa_local 162718019 + numa_other 0 + pagesets + cpu: 0 + count: 316 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 1 + count: 366 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 2 + count: 60 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 3 + count: 256 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 4 + count: 253 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 5 + count: 159 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 6 + count: 311 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 7 + count: 264 + high: 378 + batch: 63 + vm stats threshold: 56 + node_unreclaimable: 0 + start_pfn: 1048576 +Node 0, zone Movable + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) +Node 0, zone Device + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/dm-0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/dm-0/stat +Lines: 1 +6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda/queue +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/add_random +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/chunk_sectors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/dax +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_granularity +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_max_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_zeroes_data +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/fua +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/hw_sector_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_poll +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_poll_delay +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_timeout +Lines: 1 +30000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda/queue/iosched +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/back_seek_max +Lines: 1 +16384 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async +Lines: 1 +250 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync +Lines: 1 +125 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/low_latency +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/max_budget +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/slice_idle +Lines: 1 +8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us +Lines: 1 +8000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/timeout_sync +Lines: 1 +125 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iostats +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/logical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_discard_segments +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb +Lines: 1 +32767 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_integrity_segments +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_sectors_kb +Lines: 1 +1280 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_segment_size +Lines: 1 +65536 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_segments +Lines: 1 +168 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/minimum_io_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nomerges +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nr_requests +Lines: 1 +64 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nr_zones +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/optimal_io_size +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/physical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/read_ahead_kb +Lines: 1 +128 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/rotational +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/rq_affinity +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/scheduler +Lines: 1 +mq-deadline kyber [bfq] none +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/wbt_lat_usec +Lines: 1 +75000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_cache +Lines: 1 +write back +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_same_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/zoned +Lines: 1 +none +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/stat +Lines: 1 +9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host/host0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo +Lines: 1 +30 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/fabric_name +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/node_name +Lines: 1 +0x2000e0071bce95f2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_id +Lines: 1 +0x000002 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_name +Lines: 1 +0x1000e0071bce95f2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_state +Lines: 1 +Online +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_type +Lines: 1 +Point-To-Point (direct nport connection) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/speed +Lines: 1 +16 Gbit +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host/host0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames +Lines: 1 +0xffffffffffffffff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/error_frames +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts +Lines: 1 +0x13 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count +Lines: 1 +0x2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count +Lines: 1 +0x8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count +Lines: 1 +0x9 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count +Lines: 1 +0x11 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count +Lines: 1 +0x10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/nos_count +Lines: 1 +0x12 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames +Lines: 1 +0x3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/rx_words +Lines: 1 +0x4 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset +Lines: 1 +0x7 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames +Lines: 1 +0x5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/tx_words +Lines: 1 +0x6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/supported_classes +Lines: 1 +Class 3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/supported_speeds +Lines: 1 +4 Gbit, 8 Gbit, 16 Gbit +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/symbolic_name +Lines: 1 +Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/board_id +Lines: 1 +SM_1141000001000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver +Lines: 1 +2.31.5050 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/hca_type +Lines: 1 +MT4099 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data +Lines: 1 +2221223609 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets +Lines: 1 +87169372 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data +Lines: 1 +26509113295 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets +Lines: 1 +85734114 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Lines: 1 +3599 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data +Lines: 1 +2460436784 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets +Lines: 1 +89332064 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data +Lines: 1 +26540356890 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets +Lines: 1 +88622850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait +Lines: 1 +3846 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/net +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/net/eth0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/device +SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_port_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_port_name +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_switch_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/speed +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/AC +SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0 +SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw +Lines: 1 +95000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us +Lines: 1 +999424 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name +Lines: 1 +short_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us +Lines: 1 +2440 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj +Lines: 1 +240422366267 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/name +Lines: 1 +package-0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw +Lines: 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us +Lines: 1 +976 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj +Lines: 1 +118821284256 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/name +Lines: 1 +core +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:a +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw +Lines: 1 +95000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us +Lines: 1 +999424 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name +Lines: 1 +short_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us +Lines: 1 +2440 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj +Lines: 1 +240422366267 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/name +Lines: 1 +package-10 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/cooling_device0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/cur_state +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/max_state +Lines: 1 +50 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/type +Lines: 1 +Processor +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/cooling_device1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/cur_state +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/max_state +Lines: 1 +27 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/type +Lines: 1 +intel_powerclamp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/thermal_zone0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/policy +Lines: 1 +step_wise +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/temp +Lines: 1 +49925 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/type +Lines: 1 +bcm2835_thermal +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/thermal_zone1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/mode +Lines: 1 +enabled +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/passive +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/policy +Lines: 1 +step_wise +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/temp +Lines: 1 +-44000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/type +Lines: 1 +acpitz +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device +SymlinkTo: ../../../ACPI0003:00 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms +Lines: 1 +10598 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem +SymlinkTo: ../../../../../../../../../class/power_supply +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type +Lines: 1 +Mains +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent +Lines: 2 +POWER_SUPPLY_NAME=AC +POWER_SUPPLY_ONLINE=0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm +Lines: 1 +2369000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity +Lines: 1 +98 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level +Lines: 1 +Normal +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold +Lines: 1 +95 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device +SymlinkTo: ../../../PNP0C0A:00 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full +Lines: 1 +50060000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design +Lines: 1 +47520000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now +Lines: 1 +49450000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer +Lines: 1 +LGC +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name +Lines: 1 +LNV-45N1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now +Lines: 1 +4830000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number +Lines: 1 +38109 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status +Lines: 1 +Discharging +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem +SymlinkTo: ../../../../../../../../../class/power_supply +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology +Lines: 1 +Li-ion +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type +Lines: 1 +Battery +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent +Lines: 16 +POWER_SUPPLY_NAME=BAT0 +POWER_SUPPLY_STATUS=Discharging +POWER_SUPPLY_PRESENT=1 +POWER_SUPPLY_TECHNOLOGY=Li-ion +POWER_SUPPLY_CYCLE_COUNT=0 +POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 +POWER_SUPPLY_VOLTAGE_NOW=11750000 +POWER_SUPPLY_POWER_NOW=5064000 +POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 +POWER_SUPPLY_ENERGY_FULL=47390000 +POWER_SUPPLY_ENERGY_NOW=40730000 +POWER_SUPPLY_CAPACITY=85 +POWER_SUPPLY_CAPACITY_LEVEL=Normal +POWER_SUPPLY_MODEL_NAME=LNV-45N1 +POWER_SUPPLY_MANUFACTURER=LGC +POWER_SUPPLY_SERIAL_NUMBER=38109 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design +Lines: 1 +10800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now +Lines: 1 +12229000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class +Lines: 1 +0x020000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device +Lines: 1 +0x15d7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override +Lines: 1 +(null) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq +Lines: 1 +140 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias +Lines: 1 +pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource +Lines: 13 +0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision +Lines: 1 +0x21 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device +Lines: 1 +0x225a +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor +Lines: 1 +0x17aa +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent +Lines: 6 +DRIVER=e1000e +PCI_CLASS=20000 +PCI_ID=8086:15D7 +PCI_SUBSYS_ID=17AA:225A +PCI_SLOT_NAME=0000:00:1f.6 +MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor +Lines: 1 +0x8086 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd/0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/0/name +Lines: 1 +demo +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/0/pool +Lines: 1 +iscsi-images +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/1/name +Lines: 1 +wrong +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/1/pool +Lines: 1 +wrong-images +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node/node1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/node/node1/vmstat +Lines: 6 +nr_free_pages 1 +nr_zone_inactive_anon 2 +nr_zone_active_anon 3 +nr_zone_inactive_file 4 +nr_zone_active_file 5 +nr_zone_unevictable 6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node/node2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/node/node2/vmstat +Lines: 6 +nr_free_pages 7 +nr_zone_inactive_anon 8 +nr_zone_active_anon 9 +nr_zone_inactive_file 10 +nr_zone_active_file 11 +nr_zone_unevictable 12 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource/clocksource0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource +Lines: 1 +tsc hpet acpi_pm +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource +Lines: 1 +tsc +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq +SymlinkTo: ../cpufreq/policy0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count +Lines: 1 +10084 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count +Lines: 1 +34818 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings +Lines: 1 +11 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list +Lines: 1 +0,4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq +Lines: 1 +1200195 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency +Lines: 1 +4294967295 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus +Lines: 1 +1 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors +Lines: 1 +performance powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver +Lines: 1 +intel_pstate +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor +Lines: 1 +powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count +Lines: 1 +523 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count +Lines: 1 +34818 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings +Lines: 1 +22 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list +Lines: 1 +1,5 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq +Lines: 1 +2400000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq +Lines: 1 +800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors +Lines: 1 +performance powersave +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq +Lines: 1 +1219917 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver +Lines: 1 +intel_pstate +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor +Lines: 1 +powersave +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq +Lines: 1 +2400000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq +Lines: 1 +800000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed +Lines: 1 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug +Lines: 7 +rate: 1.1M/sec +dirty: 20.4G +target: 20.4G +proportional: 427.5k +integral: 790.0k +change: 321.5k/sec +next io: 17ms +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us +Lines: 1 +1305 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly +Lines: 1 +131072 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used +Lines: 1 +933888 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used +Lines: 1 +1867776 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes +Lines: 1 +1073741824 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes +Lines: 1 +933888 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes +Lines: 1 +1073741824 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used +Lines: 1 +32768 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes +Lines: 1 +8388608 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes +Lines: 1 +8388608 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size +Lines: 1 +20971520 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size +Lines: 1 +20971520 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label +Lines: 1 +fixture +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid +Lines: 1 +0abb23a9-579b-43e6-ad30-227ef47fcb9d +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly +Lines: 1 +262144 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 +SymlinkTo: ../../../../devices/virtual/block/loop22 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 +SymlinkTo: ../../../../devices/virtual/block/loop23 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 +SymlinkTo: ../../../../devices/virtual/block/loop24 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 +SymlinkTo: ../../../../devices/virtual/block/loop25 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid +Lines: 1 +7f07c59f-6136-449c-ab87-e1cf2328731b +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sda1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sda1/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/xfs/sda1/stats/stats +Lines: 1 +extent_alloc 1 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sdb1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sdb1/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/xfs/sdb1/stats/stats +Lines: 1 +extent_alloc 2 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path +Lines: 1 +/home/iscsi/file_back_1G +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/iblock_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path +Lines: 1 +/dev/rbd1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rbd_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path +Lines: 1 +/dev/rbd/iscsi-images/demo +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d +SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +204950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +10325 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +40325 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 +SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +104950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +20095 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +71235 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 +SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +301950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +10195 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +30195 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 +SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +1234 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +1504 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +4733 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 000000000..0102ab0fd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "github.com/prometheus/procfs/internal/fs" +) + +// FS represents the pseudo-filesystem sys, which provides an interface to +// kernel data structures. +type FS struct { + proc fs.FS +} + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = fs.DefaultProcMountPoint + +// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. +// It will error if the mount point directory can't be read or is a file. +func NewDefaultFS() (FS, error) { + return NewFS(DefaultMountPoint) +} + +// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error +// if the mount point directory can't be read or is a file. +func NewFS(mountPoint string) (FS, error) { + fs, err := fs.NewFS(mountPoint) + if err != nil { + return FS{}, err + } + return FS{fs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go new file mode 100644 index 000000000..f8070e6e2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -0,0 +1,422 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Fscacheinfo represents fscache statistics. +type Fscacheinfo struct { + // Number of index cookies allocated + IndexCookiesAllocated uint64 + // data storage cookies allocated + DataStorageCookiesAllocated uint64 + // Number of special cookies allocated + SpecialCookiesAllocated uint64 + // Number of objects allocated + ObjectsAllocated uint64 + // Number of object allocation failures + ObjectAllocationsFailure uint64 + // Number of objects that reached the available state + ObjectsAvailable uint64 + // Number of objects that reached the dead state + ObjectsDead uint64 + // Number of objects that didn't have a coherency check + ObjectsWithoutCoherencyCheck uint64 + // Number of objects that passed a coherency check + ObjectsWithCoherencyCheck uint64 + // Number of objects that needed a coherency data update + ObjectsNeedCoherencyCheckUpdate uint64 + // Number of objects that were declared obsolete + ObjectsDeclaredObsolete uint64 + // Number of pages marked as being cached + PagesMarkedAsBeingCached uint64 + // Number of uncache page requests seen + UncachePagesRequestSeen uint64 + // Number of acquire cookie requests seen + AcquireCookiesRequestSeen uint64 + // Number of acq reqs given a NULL parent + AcquireRequestsWithNullParent uint64 + // Number of acq reqs rejected due to no cache available + AcquireRequestsRejectedNoCacheAvailable uint64 + // Number of acq reqs succeeded + AcquireRequestsSucceeded uint64 + // Number of acq reqs rejected due to error + AcquireRequestsRejectedDueToError uint64 + // Number of acq reqs failed on ENOMEM + AcquireRequestsFailedDueToEnomem uint64 + // Number of lookup calls made on cache backends + LookupsNumber uint64 + // Number of negative lookups made + LookupsNegative uint64 + // Number of positive lookups made + LookupsPositive uint64 + // Number of objects created by lookup + ObjectsCreatedByLookup uint64 + // Number of lookups timed out and requeued + LookupsTimedOutAndRequed uint64 + InvalidationsNumber uint64 + InvalidationsRunning uint64 + // Number of update cookie requests seen + UpdateCookieRequestSeen uint64 + // Number of upd reqs given a NULL parent + UpdateRequestsWithNullParent uint64 + // Number of upd reqs granted CPU time + UpdateRequestsRunning uint64 + // Number of relinquish cookie requests seen + RelinquishCookiesRequestSeen uint64 + // Number of rlq reqs given a NULL parent + RelinquishCookiesWithNullParent uint64 + // Number of rlq reqs waited on completion of creation + RelinquishRequestsWaitingCompleteCreation uint64 + // Relinqs rtr + RelinquishRetries uint64 + // Number of attribute changed requests seen + AttributeChangedRequestsSeen uint64 + // Number of attr changed requests queued + AttributeChangedRequestsQueued uint64 + // Number of attr changed rejected -ENOBUFS + AttributeChangedRejectDueToEnobufs uint64 + // Number of attr changed failed -ENOMEM + AttributeChangedFailedDueToEnomem uint64 + // Number of attr changed ops given CPU time + AttributeChangedOps uint64 + // Number of allocation requests seen + AllocationRequestsSeen uint64 + // Number of successful alloc reqs + AllocationOkRequests uint64 + // Number of alloc reqs that waited on lookup completion + AllocationWaitingOnLookup uint64 + // Number of alloc reqs rejected -ENOBUFS + AllocationsRejectedDueToEnobufs uint64 + // Number of alloc reqs aborted -ERESTARTSYS + AllocationsAbortedDueToErestartsys uint64 + // Number of alloc reqs submitted + AllocationOperationsSubmitted uint64 + // Number of alloc reqs waited for CPU time + AllocationsWaitedForCPU uint64 + // Number of alloc reqs aborted due to object death + AllocationsAbortedDueToObjectDeath uint64 + // Number of retrieval (read) requests seen + RetrievalsReadRequests uint64 + // Number of successful retr reqs + RetrievalsOk uint64 + // Number of retr reqs that waited on lookup completion + RetrievalsWaitingLookupCompletion uint64 + // Number of retr reqs returned -ENODATA + RetrievalsReturnedEnodata uint64 + // Number of retr reqs rejected -ENOBUFS + RetrievalsRejectedDueToEnobufs uint64 + // Number of retr reqs aborted -ERESTARTSYS + RetrievalsAbortedDueToErestartsys uint64 + // Number of retr reqs failed -ENOMEM + RetrievalsFailedDueToEnomem uint64 + // Number of retr reqs submitted + RetrievalsRequests uint64 + // Number of retr reqs waited for CPU time + RetrievalsWaitingCPU uint64 + // Number of retr reqs aborted due to object death + RetrievalsAbortedDueToObjectDeath uint64 + // Number of storage (write) requests seen + StoreWriteRequests uint64 + // Number of successful store reqs + StoreSuccessfulRequests uint64 + // Number of store reqs on a page already pending storage + StoreRequestsOnPendingStorage uint64 + // Number of store reqs rejected -ENOBUFS + StoreRequestsRejectedDueToEnobufs uint64 + // Number of store reqs failed -ENOMEM + StoreRequestsFailedDueToEnomem uint64 + // Number of store reqs submitted + StoreRequestsSubmitted uint64 + // Number of store reqs granted CPU time + StoreRequestsRunning uint64 + // Number of pages given store req processing time + StorePagesWithRequestsProcessing uint64 + // Number of store reqs deleted from tracking tree + StoreRequestsDeleted uint64 + // Number of store reqs over store limit + StoreRequestsOverStoreLimit uint64 + // Number of release reqs against pages with no pending store + ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 + // Number of release reqs against pages stored by time lock granted + ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 + // Number of release reqs ignored due to in-progress store + ReleaseRequestsIgnoredDueToInProgressStore uint64 + // Number of page stores cancelled due to release req + PageStoresCancelledByReleaseRequests uint64 + VmscanWaiting uint64 + // Number of times async ops added to pending queues + OpsPending uint64 + // Number of times async ops given CPU time + OpsRunning uint64 + // Number of times async ops queued for processing + OpsEnqueued uint64 + // Number of async ops cancelled + OpsCancelled uint64 + // Number of async ops rejected due to object lookup/create failure + OpsRejected uint64 + // Number of async ops initialised + OpsInitialised uint64 + // Number of async ops queued for deferred release + OpsDeferred uint64 + // Number of async ops released (should equal ini=N when idle) + OpsReleased uint64 + // Number of deferred-release async ops garbage collected + OpsGarbageCollected uint64 + // Number of in-progress alloc_object() cache ops + CacheopAllocationsinProgress uint64 + // Number of in-progress lookup_object() cache ops + CacheopLookupObjectInProgress uint64 + // Number of in-progress lookup_complete() cache ops + CacheopLookupCompleteInPorgress uint64 + // Number of in-progress grab_object() cache ops + CacheopGrabObjectInProgress uint64 + CacheopInvalidations uint64 + // Number of in-progress update_object() cache ops + CacheopUpdateObjectInProgress uint64 + // Number of in-progress drop_object() cache ops + CacheopDropObjectInProgress uint64 + // Number of in-progress put_object() cache ops + CacheopPutObjectInProgress uint64 + // Number of in-progress attr_changed() cache ops + CacheopAttributeChangeInProgress uint64 + // Number of in-progress sync_cache() cache ops + CacheopSyncCacheInProgress uint64 + // Number of in-progress read_or_alloc_page() cache ops + CacheopReadOrAllocPageInProgress uint64 + // Number of in-progress read_or_alloc_pages() cache ops + CacheopReadOrAllocPagesInProgress uint64 + // Number of in-progress allocate_page() cache ops + CacheopAllocatePageInProgress uint64 + // Number of in-progress allocate_pages() cache ops + CacheopAllocatePagesInProgress uint64 + // Number of in-progress write_page() cache ops + CacheopWritePagesInProgress uint64 + // Number of in-progress uncache_page() cache ops + CacheopUncachePagesInProgress uint64 + // Number of in-progress dissociate_pages() cache ops + CacheopDissociatePagesInProgress uint64 + // Number of object lookups/creations rejected due to lack of space + CacheevLookupsAndCreationsRejectedLackSpace uint64 + // Number of stale objects deleted + CacheevStaleObjectsDeleted uint64 + // Number of objects retired when relinquished + CacheevRetiredWhenReliquished uint64 + // Number of objects culled + CacheevObjectsCulled uint64 +} + +// Fscacheinfo returns information about current fscache statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt +func (fs FS) Fscacheinfo() (Fscacheinfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) + if err != nil { + return Fscacheinfo{}, err + } + + m, err := parseFscacheinfo(bytes.NewReader(b)) + if err != nil { + return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) + } + + return *m, nil +} + +func setFSCacheFields(fields []string, setFields ...*uint64) error { + var err error + if len(fields) < len(setFields) { + return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + } + + for i := range setFields { + *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) + if err != nil { + return err + } + } + return nil +} + +func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { + var m Fscacheinfo + s := bufio.NewScanner(r) + for s.Scan() { + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + } + + switch fields[0] { + case "Cookies:": + err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, + &m.SpecialCookiesAllocated) + if err != nil { + return &m, err + } + case "Objects:": + err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, + &m.ObjectsAvailable, &m.ObjectsDead) + if err != nil { + return &m, err + } + case "ChkAux": + err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, + &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) + if err != nil { + return &m, err + } + case "Pages": + err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) + if err != nil { + return &m, err + } + case "Acquire:": + err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, + &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, + &m.AcquireRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + case "Lookups:": + err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, + &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) + if err != nil { + return &m, err + } + case "Invals": + err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) + if err != nil { + return &m, err + } + case "Updates:": + err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, + &m.UpdateRequestsRunning) + if err != nil { + return &m, err + } + case "Relinqs:": + err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, + &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) + if err != nil { + return &m, err + } + case "AttrChg:": + err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, + &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) + if err != nil { + return &m, err + } + case "Allocs": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, + &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, + &m.AllocationsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Retrvls:": + if strings.Split(fields[1], "=")[0] == "n" { + err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, + &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, + &m.RetrievalsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Stores": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, + &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, + &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) + if err != nil { + return &m, err + } + } + case "VmScan": + err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, + &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, + &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) + if err != nil { + return &m, err + } + case "Ops": + if strings.Split(fields[2], "=")[0] == "pend" { + err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) + if err != nil { + return &m, err + } + } + case "CacheOp:": + if strings.Split(fields[1], "=")[0] == "alo" { + err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, + &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) + if err != nil { + return &m, err + } + } else if strings.Split(fields[1], "=")[0] == "inv" { + err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, + &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, + &m.CacheopSyncCacheInProgress) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, + &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, + &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) + if err != nil { + return &m, err + } + } + case "CacheEv:": + err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, + &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) + if err != nil { + return &m, err + } + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go new file mode 100644 index 000000000..0040753b1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" + + // DefaultSysMountPoint is the common mount point of the sys filesystem. + DefaultSysMountPoint = "/sys" + + // DefaultConfigfsMountPoint is the common mount point of the configfs + DefaultConfigfsMountPoint = "/sys/kernel/config" +) + +// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an +// interface to kernel data structures. +type FS string + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %q: %w", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %q is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path appends the given path elements to the filesystem path, adding separators +// as necessary. +func (fs FS) Path(p ...string) string { + return filepath.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 000000000..22cb07a6b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io/ioutil" + "strconv" + "strings" +) + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} + +// ParsePInt64s parses a slice of strings into a slice of int64 pointers. +func ParsePInt64s(ss []string) ([]*int64, error) { + us := make([]*int64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// ReadIntFromFile reads a file and attempts to parse a int64 from it. +func ReadIntFromFile(path string) (int64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} + +// ParseBool parses a string into a boolean pointer. +func ParseBool(b string) *bool { + var truth bool + switch b { + case "enabled": + truth = true + case "disabled": + truth = false + default: + return nil + } + return &truth +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go new file mode 100644 index 000000000..8051161b2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io" + "io/ioutil" + "os" +) + +// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. +// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). +// Reads a max file size of 512kB. For files larger than this, a scanner +// should be used. +func ReadFileNoStat(filename string) ([]byte, error) { + const maxBufferSize = 1024 * 512 + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + reader := io.LimitReader(f, maxBufferSize) + return ioutil.ReadAll(reader) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go new file mode 100644 index 000000000..c07de0b6c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -0,0 +1,48 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,!appengine + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +// +// Note that this function will not read files larger than 128 bytes. +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + const sysFileBufferSize = 128 + b := make([]byte, sysFileBufferSize) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/blevesearch/bleve/v2/config_disk.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go similarity index 55% rename from vendor/github.com/blevesearch/bleve/v2/config_disk.go rename to vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go index ccfd6da8c..bd55b4537 100644 --- a/vendor/github.com/blevesearch/bleve/v2/config_disk.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -1,10 +1,9 @@ -// Copyright (c) 2014 Couchbase, Inc. -// +// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,14 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !appengine,!appenginevm +// +build linux,appengine !linux -package bleve +package util -import "github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb" +import ( + "fmt" +) -// in normal environments we configure boltdb as the default storage -func initDisk() { - // default kv store - Config.DefaultKVStore = boltdb.Name +// SysReadFile is here implemented as a noop for builds that do not support +// the read syscall. For example Windows, or Linux on Google App Engine. +func SysReadFile(file string) (string, error) { + return "", fmt.Errorf("not supported on this platform") } diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go new file mode 100644 index 000000000..fe2355d3c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "strconv" +) + +// TODO(mdlayher): util packages are an anti-pattern and this should be moved +// somewhere else that is more focused in the future. + +// A ValueParser enables parsing a single string into a variety of data types +// in a concise and safe way. The Err method must be invoked after invoking +// any other methods to ensure a value was successfully parsed. +type ValueParser struct { + v string + err error +} + +// NewValueParser creates a ValueParser using the input string. +func NewValueParser(v string) *ValueParser { + return &ValueParser{v: v} +} + +// Int interprets the underlying value as an int and returns that value. +func (vp *ValueParser) Int() int { return int(vp.int64()) } + +// PInt64 interprets the underlying value as an int64 and returns a pointer to +// that value. +func (vp *ValueParser) PInt64() *int64 { + if vp.err != nil { + return nil + } + + v := vp.int64() + return &v +} + +// int64 interprets the underlying value as an int64 and returns that value. +// TODO: export if/when necessary. +func (vp *ValueParser) int64() int64 { + if vp.err != nil { + return 0 + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseInt(vp.v, base, 64) + if err != nil { + vp.err = err + return 0 + } + + return v +} + +// PUInt64 interprets the underlying value as an uint64 and returns a pointer to +// that value. +func (vp *ValueParser) PUInt64() *uint64 { + if vp.err != nil { + return nil + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseUint(vp.v, base, 64) + if err != nil { + vp.err = err + return nil + } + + return &v +} + +// Err returns the last error, if any, encountered by the ValueParser. +func (vp *ValueParser) Err() error { + return vp.err +} diff --git a/vendor/github.com/m3db/prometheus_procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go similarity index 66% rename from vendor/github.com/m3db/prometheus_procfs/ipvs.go rename to vendor/github.com/prometheus/procfs/ipvs.go index e7012f732..89e447746 100644 --- a/vendor/github.com/m3db/prometheus_procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -1,7 +1,21 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( "bufio" + "bytes" "encoding/hex" "errors" "fmt" @@ -11,6 +25,8 @@ import ( "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/util" ) // IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. @@ -31,14 +47,16 @@ type IPVSStats struct { type IPVSBackendStatus struct { // The local (virtual) IP address. LocalAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The transport protocol (TCP, UDP). - Proto string // The remote (real) IP address. RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 // The remote (real) port. RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string // The current number of active connections for this virtual/real address pair. ActiveConn uint64 // The current number of inactive connections for this virtual/real address pair. @@ -47,29 +65,18 @@ type IPVSBackendStatus struct { Weight uint64 } -// NewIPVSStats reads the IPVS statistics. -func NewIPVSStats() (IPVSStats, error) { - fs, err := NewFS(DefaultMountPoint) +// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) IPVSStats() (IPVSStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) if err != nil { return IPVSStats{}, err } - return fs.NewIPVSStats() -} - -// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) NewIPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - defer file.Close() - - return parseIPVSStats(file) + return parseIPVSStats(bytes.NewReader(data)) } // parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(file io.Reader) (IPVSStats, error) { +func parseIPVSStats(r io.Reader) (IPVSStats, error) { var ( statContent []byte statLines []string @@ -77,7 +84,7 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) { stats IPVSStats ) - statContent, err := ioutil.ReadAll(file) + statContent, err := ioutil.ReadAll(r) if err != nil { return IPVSStats{}, err } @@ -116,19 +123,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) { return stats, nil } -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. -func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return []IPVSBackendStatus{}, err - } - - return fs.NewIPVSBackendStatus() -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.Path("net/ip_vs")) +// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs")) if err != nil { return nil, err } @@ -142,13 +139,14 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { status []IPVSBackendStatus scanner = bufio.NewScanner(file) proto string + localMark string localAddress net.IP localPort uint16 err error ) for scanner.Scan() { - fields := strings.Fields(string(scanner.Text())) + fields := strings.Fields(scanner.Text()) if len(fields) == 0 { continue } @@ -160,10 +158,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { continue } proto = fields[0] + localMark = "" localAddress, localPort, err = parseIPPort(fields[1]) if err != nil { return nil, err } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 case fields[0] == "->": if len(fields) < 6 { continue @@ -187,6 +194,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { status = append(status, IPVSBackendStatus{ LocalAddress: localAddress, LocalPort: localPort, + LocalMark: localMark, RemoteAddress: remoteAddress, RemotePort: remotePort, Proto: proto, @@ -200,22 +208,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { } func parseIPPort(s string) (net.IP, uint16, error) { - tmp := strings.SplitN(s, ":", 2) - - if len(tmp) != 2 { - return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) - } + var ( + ip net.IP + err error + ) - if len(tmp[0]) != 8 && len(tmp[0]) != 32 { - return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0]) + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) } - ip, err := hex.DecodeString(tmp[0]) - if err != nil { - return nil, 0, err + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) } - - port, err := strconv.ParseUint(tmp[1], 16, 16) + port, err := strconv.ParseUint(portString, 16, 16) if err != nil { return nil, 0, err } diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go new file mode 100644 index 000000000..da3a941d6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -0,0 +1,62 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "os" + + "github.com/prometheus/procfs/internal/util" +) + +// KernelRandom contains information about to the kernel's random number generator. +type KernelRandom struct { + // EntropyAvaliable gives the available entropy, in bits. + EntropyAvaliable *uint64 + // PoolSize gives the size of the entropy pool, in bits. + PoolSize *uint64 + // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. + URandomMinReseedSeconds *uint64 + // WriteWakeupThreshold the number of bits of entropy below which we wake up processes + // that do a select(2) or poll(2) for write access to /dev/random. + WriteWakeupThreshold *uint64 + // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep + // waiting for entropy from /dev/random. + ReadWakeupThreshold *uint64 +} + +// KernelRandom returns values from /proc/sys/kernel/random. +func (fs FS) KernelRandom() (KernelRandom, error) { + random := KernelRandom{} + + for file, p := range map[string]**uint64{ + "entropy_avail": &random.EntropyAvaliable, + "poolsize": &random.PoolSize, + "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, + "write_wakeup_threshold": &random.WriteWakeupThreshold, + "read_wakeup_threshold": &random.ReadWakeupThreshold, + } { + val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) + if os.IsNotExist(err) { + continue + } + if err != nil { + return random, err + } + *p = &val + } + + return random, nil +} diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go new file mode 100644 index 000000000..0cce190ec --- /dev/null +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// LoadAvg represents an entry in /proc/loadavg +type LoadAvg struct { + Load1 float64 + Load5 float64 + Load15 float64 +} + +// LoadAvg returns loadavg from /proc. +func (fs FS) LoadAvg() (*LoadAvg, error) { + path := fs.proc.Path("loadavg") + + data, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + return parseLoad(data) +} + +// Parse /proc loadavg and return 1m, 5m and 15m. +func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { + loads := make([]float64, 3) + parts := strings.Fields(string(loadavgBytes)) + if len(parts) < 3 { + return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) + } + + var err error + for i, load := range parts[0:3] { + loads[i], err = strconv.ParseFloat(load, 64) + if err != nil { + return nil, fmt.Errorf("could not parse load %q: %w", load, err) + } + } + return &LoadAvg{ + Load1: loads[0], + Load5: loads[1], + Load15: loads[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 000000000..4c4493bfa --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,213 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) + componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device requires. + DisksTotal int64 + // Number of failed disks. + DisksFailed int64 + // Spare disks in the device. + DisksSpare int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 + // Name of md component devices + Devices []string +} + +// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. More information available here: +// https://raid.wiki.kernel.org/index.php/Mdstat +func (fs FS) MDStat() ([]MDStat, error) { + data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) + if err != nil { + return nil, err + } + mdstat, err := parseMDStat(data) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) + } + return mdstat, nil +} + +// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. +func parseMDStat(mdStatData []byte) ([]MDStat, error) { + mdStats := []MDStat{} + lines := strings.Split(string(mdStatData), "\n") + + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || + strings.HasPrefix(line, "Personalities") || + strings.HasPrefix(line, "unused") { + continue + } + + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 { + return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + } + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive + + if len(lines) <= i+3 { + return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) + } + + // Failed disks have the suffix (F) & Spare disks have the suffix (S). + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + active, total, size, err := evalStatusLine(lines[i], lines[i+1]) + + if err != nil { + return nil, fmt.Errorf("error parsing md device lines: %w", err) + } + + syncLineIdx := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + syncLineIdx++ + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") + + // Append recovery and resyncing state info. + if recovering || resyncing || checking { + if recovering { + state = "recovering" + } else if checking { + state = "checking" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || + strings.Contains(lines[syncLineIdx], "DELAYED") { + syncedBlocks = 0 + } else { + syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + } + } + } + + mdStats = append(mdStats, MDStat{ + Name: mdName, + ActivityState: state, + DisksActive: active, + DisksFailed: fail, + DisksSpare: spare, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + Devices: evalComponentDevices(deviceFields), + }) + } + + return mdStats, nil +} + +func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { + + sizeStr := strings.Fields(statusLine)[0] + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total = int64(strings.Count(deviceLine, "[")) + return total, total, size, nil + } + + if strings.Contains(deviceLine, "inactive") { + return 0, 0, size, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLine) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + } + + return active, total, size, nil +} + +func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { + matches := recoveryLineRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) + } + + return syncedBlocks, nil +} + +func evalComponentDevices(deviceFields []string) []string { + mdComponentDevices := make([]string, 0) + if len(deviceFields) > 3 { + for _, field := range deviceFields[4:] { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + mdComponentDevices = append(mdComponentDevices, match[1]) + } + } + + return mdComponentDevices +} diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go new file mode 100644 index 000000000..f65e174e5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -0,0 +1,277 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Meminfo represents memory statistics. +type Meminfo struct { + // Total usable ram (i.e. physical ram minus a few reserved + // bits and the kernel binary code) + MemTotal *uint64 + // The sum of LowFree+HighFree + MemFree *uint64 + // An estimate of how much memory is available for starting + // new applications, without swapping. Calculated from + // MemFree, SReclaimable, the size of the file LRU lists, and + // the low watermarks in each zone. The estimate takes into + // account that the system needs some page cache to function + // well, and that not all reclaimable slab will be + // reclaimable, due to items being in use. The impact of those + // factors will vary from system to system. + MemAvailable *uint64 + // Relatively temporary storage for raw disk blocks shouldn't + // get tremendously large (20MB or so) + Buffers *uint64 + Cached *uint64 + // Memory that once was swapped out, is swapped back in but + // still also is in the swapfile (if memory is needed it + // doesn't need to be swapped out AGAIN because it is already + // in the swapfile. This saves I/O) + SwapCached *uint64 + // Memory that has been used more recently and usually not + // reclaimed unless absolutely necessary. + Active *uint64 + // Memory which has been less recently used. It is more + // eligible to be reclaimed for other purposes + Inactive *uint64 + ActiveAnon *uint64 + InactiveAnon *uint64 + ActiveFile *uint64 + InactiveFile *uint64 + Unevictable *uint64 + Mlocked *uint64 + // total amount of swap space available + SwapTotal *uint64 + // Memory which has been evicted from RAM, and is temporarily + // on the disk + SwapFree *uint64 + // Memory which is waiting to get written back to the disk + Dirty *uint64 + // Memory which is actively being written back to the disk + Writeback *uint64 + // Non-file backed pages mapped into userspace page tables + AnonPages *uint64 + // files which have been mapped, such as libraries + Mapped *uint64 + Shmem *uint64 + // in-kernel data structures cache + Slab *uint64 + // Part of Slab, that might be reclaimed, such as caches + SReclaimable *uint64 + // Part of Slab, that cannot be reclaimed on memory pressure + SUnreclaim *uint64 + KernelStack *uint64 + // amount of memory dedicated to the lowest level of page + // tables. + PageTables *uint64 + // NFS pages sent to the server, but not yet committed to + // stable storage + NFSUnstable *uint64 + // Memory used for block device "bounce buffers" + Bounce *uint64 + // Memory used by FUSE for temporary writeback buffers + WritebackTmp *uint64 + // Based on the overcommit ratio ('vm.overcommit_ratio'), + // this is the total amount of memory currently available to + // be allocated on the system. This limit is only adhered to + // if strict overcommit accounting is enabled (mode 2 in + // 'vm.overcommit_memory'). + // The CommitLimit is calculated with the following formula: + // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * + // overcommit_ratio / 100 + [total swap pages] + // For example, on a system with 1G of physical RAM and 7G + // of swap with a `vm.overcommit_ratio` of 30 it would + // yield a CommitLimit of 7.3G. + // For more details, see the memory overcommit documentation + // in vm/overcommit-accounting. + CommitLimit *uint64 + // The amount of memory presently allocated on the system. + // The committed memory is a sum of all of the memory which + // has been allocated by processes, even if it has not been + // "used" by them as of yet. A process which malloc()'s 1G + // of memory, but only touches 300M of it will show up as + // using 1G. This 1G is memory which has been "committed" to + // by the VM and can be used at any time by the allocating + // application. With strict overcommit enabled on the system + // (mode 2 in 'vm.overcommit_memory'),allocations which would + // exceed the CommitLimit (detailed above) will not be permitted. + // This is useful if one needs to guarantee that processes will + // not fail due to lack of memory once that memory has been + // successfully allocated. + CommittedAS *uint64 + // total size of vmalloc memory area + VmallocTotal *uint64 + // amount of vmalloc area which is used + VmallocUsed *uint64 + // largest contiguous block of vmalloc area which is free + VmallocChunk *uint64 + HardwareCorrupted *uint64 + AnonHugePages *uint64 + ShmemHugePages *uint64 + ShmemPmdMapped *uint64 + CmaTotal *uint64 + CmaFree *uint64 + HugePagesTotal *uint64 + HugePagesFree *uint64 + HugePagesRsvd *uint64 + HugePagesSurp *uint64 + Hugepagesize *uint64 + DirectMap4k *uint64 + DirectMap2M *uint64 + DirectMap1G *uint64 +} + +// Meminfo returns an information about current kernel/system memory statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Meminfo() (Meminfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) + if err != nil { + return Meminfo{}, err + } + + m, err := parseMemInfo(bytes.NewReader(b)) + if err != nil { + return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) + } + + return *m, nil +} + +func parseMemInfo(r io.Reader) (*Meminfo, error) { + var m Meminfo + s := bufio.NewScanner(r) + for s.Scan() { + // Each line has at least a name and value; we ignore the unit. + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + } + + v, err := strconv.ParseUint(fields[1], 0, 64) + if err != nil { + return nil, err + } + + switch fields[0] { + case "MemTotal:": + m.MemTotal = &v + case "MemFree:": + m.MemFree = &v + case "MemAvailable:": + m.MemAvailable = &v + case "Buffers:": + m.Buffers = &v + case "Cached:": + m.Cached = &v + case "SwapCached:": + m.SwapCached = &v + case "Active:": + m.Active = &v + case "Inactive:": + m.Inactive = &v + case "Active(anon):": + m.ActiveAnon = &v + case "Inactive(anon):": + m.InactiveAnon = &v + case "Active(file):": + m.ActiveFile = &v + case "Inactive(file):": + m.InactiveFile = &v + case "Unevictable:": + m.Unevictable = &v + case "Mlocked:": + m.Mlocked = &v + case "SwapTotal:": + m.SwapTotal = &v + case "SwapFree:": + m.SwapFree = &v + case "Dirty:": + m.Dirty = &v + case "Writeback:": + m.Writeback = &v + case "AnonPages:": + m.AnonPages = &v + case "Mapped:": + m.Mapped = &v + case "Shmem:": + m.Shmem = &v + case "Slab:": + m.Slab = &v + case "SReclaimable:": + m.SReclaimable = &v + case "SUnreclaim:": + m.SUnreclaim = &v + case "KernelStack:": + m.KernelStack = &v + case "PageTables:": + m.PageTables = &v + case "NFS_Unstable:": + m.NFSUnstable = &v + case "Bounce:": + m.Bounce = &v + case "WritebackTmp:": + m.WritebackTmp = &v + case "CommitLimit:": + m.CommitLimit = &v + case "Committed_AS:": + m.CommittedAS = &v + case "VmallocTotal:": + m.VmallocTotal = &v + case "VmallocUsed:": + m.VmallocUsed = &v + case "VmallocChunk:": + m.VmallocChunk = &v + case "HardwareCorrupted:": + m.HardwareCorrupted = &v + case "AnonHugePages:": + m.AnonHugePages = &v + case "ShmemHugePages:": + m.ShmemHugePages = &v + case "ShmemPmdMapped:": + m.ShmemPmdMapped = &v + case "CmaTotal:": + m.CmaTotal = &v + case "CmaFree:": + m.CmaFree = &v + case "HugePages_Total:": + m.HugePagesTotal = &v + case "HugePages_Free:": + m.HugePagesFree = &v + case "HugePages_Rsvd:": + m.HugePagesRsvd = &v + case "HugePages_Surp:": + m.HugePagesSurp = &v + case "Hugepagesize:": + m.Hugepagesize = &v + case "DirectMap4k:": + m.DirectMap4k = &v + case "DirectMap2M:": + m.DirectMap2M = &v + case "DirectMap1G:": + m.DirectMap1G = &v + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go new file mode 100644 index 000000000..59f4d5055 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -0,0 +1,180 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A MountInfo is a type that describes the details, options +// for each mount, parsed from /proc/self/mountinfo. +// The fields described in each entry of /proc/self/mountinfo +// is described in the following man page. +// http://man7.org/linux/man-pages/man5/proc.5.html +type MountInfo struct { + // Unique ID for the mount + MountID int + // The ID of the parent mount + ParentID int + // The value of `st_dev` for the files on this FS + MajorMinorVer string + // The pathname of the directory in the FS that forms + // the root for this mount + Root string + // The pathname of the mount point relative to the root + MountPoint string + // Mount options + Options map[string]string + // Zero or more optional fields + OptionalFields map[string]string + // The Filesystem type + FSType string + // FS specific information or "none" + Source string + // Superblock options + SuperOptions map[string]string +} + +// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. +func parseMountInfo(info []byte) ([]*MountInfo, error) { + mounts := []*MountInfo{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseMountInfoString(mountString) + if err != nil { + return nil, err + } + mounts = append(mounts, parsedMounts) + } + + err := scanner.Err() + return mounts, err +} + +// Parses a mountinfo file line, and converts it to a MountInfo struct. +// An important check here is to see if the hyphen separator, as if it does not exist, +// it means that the line is malformed. +func parseMountInfoString(mountString string) (*MountInfo, error) { + var err error + + mountInfo := strings.Split(mountString, " ") + mountInfoLength := len(mountInfo) + if mountInfoLength < 10 { + return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + } + + if mountInfo[mountInfoLength-4] != "-" { + return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + } + + mount := &MountInfo{ + MajorMinorVer: mountInfo[2], + Root: mountInfo[3], + MountPoint: mountInfo[4], + Options: mountOptionsParser(mountInfo[5]), + OptionalFields: nil, + FSType: mountInfo[mountInfoLength-3], + Source: mountInfo[mountInfoLength-2], + SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), + } + + mount.MountID, err = strconv.Atoi(mountInfo[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse mount ID") + } + mount.ParentID, err = strconv.Atoi(mountInfo[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse parent ID") + } + // Has optional fields, which is a space separated list of values. + // Example: shared:2 master:7 + if mountInfo[6] != "" { + mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) + if err != nil { + return nil, err + } + } + return mount, nil +} + +// mountOptionsIsValidField checks a string against a valid list of optional fields keys. +func mountOptionsIsValidField(s string) bool { + switch s { + case + "shared", + "master", + "propagate_from", + "unbindable": + return true + } + return false +} + +// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. +func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { + optionalFields := make(map[string]string) + for _, field := range o { + optionSplit := strings.SplitN(field, ":", 2) + value := "" + if len(optionSplit) == 2 { + value = optionSplit[1] + } + if mountOptionsIsValidField(optionSplit[0]) { + optionalFields[optionSplit[0]] = value + } + } + return optionalFields, nil +} + +// mountOptionsParser parses the mount options, superblock options. +func mountOptionsParser(mountOptions string) map[string]string { + opts := make(map[string]string) + options := strings.Split(mountOptions, ",") + for _, opt := range options { + splitOption := strings.Split(opt, "=") + if len(splitOption) < 2 { + key := splitOption[0] + opts[key] = "" + } else { + key, value := splitOption[0], splitOption[1] + opts[key] = value + } + } + return opts +} + +// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. +func GetMounts() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat("/proc/self/mountinfo") + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. +func GetProcMounts(pid int) ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} diff --git a/vendor/github.com/m3db/prometheus_procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go similarity index 77% rename from vendor/github.com/m3db/prometheus_procfs/mountstats.go rename to vendor/github.com/prometheus/procfs/mountstats.go index 47ab0a744..f7a828bb1 100644 --- a/vendor/github.com/m3db/prometheus_procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs // While implementing parsing of /proc/[pid]/mountstats, this blog was used @@ -26,8 +39,11 @@ const ( statVersion10 = "1.0" statVersion11 = "1.1" - fieldTransport10Len = 10 - fieldTransport11Len = 13 + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -53,6 +69,8 @@ type MountStats interface { type MountStatsNFS struct { // The version of statistics provided. StatVersion string + // The mount options of the NFS mount. + Opts map[string]string // The age of the NFS mount. Age time.Duration // Statistics related to byte counters for various operations. @@ -123,7 +141,7 @@ type NFSEventsStats struct { VFSFlush uint64 // Number of times fsync() has been called on directories and files. VFSFsync uint64 - // Number of times locking has been attemped on a file. + // Number of times locking has been attempted on a file. VFSLock uint64 // Number of times files have been closed and released. VFSFileRelease uint64 @@ -163,16 +181,20 @@ type NFSOperationStats struct { // Number of bytes received for this operation, including RPC headers and payload. BytesReceived uint64 // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueTime time.Duration + CumulativeQueueMilliseconds uint64 // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseTime time.Duration + CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestTime time.Duration + CumulativeTotalRequestMilliseconds uint64 + // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. + Errors uint64 } // A NFSTransportStats contains statistics for the NFS mount RPC requests and // responses. type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string // The local port used for the NFS mount. Port uint64 // Number of times the client has had to establish a connection from scratch @@ -184,7 +206,7 @@ type NFSTransportStats struct { // spent waiting for connections to the server to be established. ConnectIdleTime uint64 // Duration since the NFS mount last saw any RPC traffic. - IdleTime time.Duration + IdleTimeSeconds uint64 // Number of RPC requests for this mount sent to the NFS server. Sends uint64 // Number of RPC responses for this mount received from the NFS server. @@ -299,6 +321,7 @@ func parseMount(ss []string) (*Mount, error) { func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { // Field indicators for parsing specific types of data const ( + fieldOpts = "opts:" fieldAge = "age:" fieldBytes = "bytes:" fieldEvents = "events:" @@ -315,12 +338,27 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e if len(ss) == 0 { break } - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } switch ss[0] { + case fieldOpts: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + if stats.Opts == nil { + stats.Opts = map[string]string{} + } + for _, opt := range strings.Split(ss[1], ",") { + split := strings.Split(opt, "=") + if len(split) == 2 { + stats.Opts[split[0]] = split[1] + } else { + stats.Opts[opt] = "" + } + } case fieldAge: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") if err != nil { @@ -329,6 +367,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Age = d case fieldBytes: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } bstats, err := parseNFSBytesStats(ss[1:]) if err != nil { return nil, err @@ -336,6 +377,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Bytes = *bstats case fieldEvents: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } estats, err := parseNFSEventsStats(ss[1:]) if err != nil { return nil, err @@ -347,7 +391,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) } - tstats, err := parseNFSTransportStats(ss[2:], statVersion) + tstats, err := parseNFSTransportStats(ss[1:], statVersion) if err != nil { return nil, err } @@ -356,7 +400,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } // When encountering "per-operation statistics", we must break this - // loop and parse them seperately to ensure we can terminate parsing + // loop and parse them separately to ensure we can terminate parsing // before reaching another device entry; hence why this 'if' statement // is not just another switch case if ss[0] == fieldPerOpStats { @@ -461,8 +505,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { // line is reached. func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { const ( - // Number of expected fields in each per-operation statistics set - numFields = 9 + // Minimum number of expected fields in each per-operation statistics set + minFields = 9 ) var ops []NFSOperationStats @@ -475,12 +519,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { break } - if len(ss) != numFields { + if len(ss) < minFields { return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) } // Skip string operation name for integers - ns := make([]uint64, 0, numFields-1) + ns := make([]uint64, 0, minFields-1) for _, st := range ss[1:] { n, err := strconv.ParseUint(st, 10, 64) if err != nil { @@ -490,17 +534,23 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { ns = append(ns, n) } - ops = append(ops, NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, - CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, - CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, - }) + opStats := NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueMilliseconds: ns[5], + CumulativeTotalResponseMilliseconds: ns[6], + CumulativeTotalRequestMilliseconds: ns[7], + } + + if len(ns) > 8 { + opStats.Errors = ns[8] + } + + ops = append(ops, opStats) } return ops, s.Err() @@ -509,13 +559,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { // parseNFSTransportStats parses a NFSTransportStats line using an input set of // integer fields matched to a specific stats version. func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + switch statVersion { case statVersion10: - if len(ss) != fieldTransport10Len { + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) } case statVersion11: - if len(ss) != fieldTransport11Len { + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) } default: @@ -523,23 +593,39 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response - ns := make([]uint64, 0, fieldTransport11Len) - for _, s := range ss { + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11TCPLen) + for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { return nil, err } - ns = append(ns, n) + ns[i] = n + } + + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) } return &NFSTransportStats{ + Protocol: protocol, Port: ns[0], Bind: ns[1], Connect: ns[2], ConnectIdleTime: ns[3], - IdleTime: time.Duration(ns[4]) * time.Second, + IdleTimeSeconds: ns[4], Sends: ns[5], Receives: ns[6], BadTransactionIDs: ns[7], diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go new file mode 100644 index 000000000..9964a3600 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -0,0 +1,153 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A ConntrackStatEntry represents one line from net/stat/nf_conntrack +// and contains netfilter conntrack statistics at one CPU core +type ConntrackStatEntry struct { + Entries uint64 + Found uint64 + Invalid uint64 + Ignore uint64 + Insert uint64 + InsertFailed uint64 + Drop uint64 + EarlyDrop uint64 + SearchRestart uint64 +} + +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores +func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { + return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) +} + +// Parses a slice of ConntrackStatEntries from the given filepath +func readConntrackStat(path string) ([]ConntrackStatEntry, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(path) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseConntrackStat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) + } + + return stat, nil +} + +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { + var entries []ConntrackStatEntry + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + conntrackEntry, err := parseConntrackStatEntry(fields) + if err != nil { + return nil, err + } + entries = append(entries, *conntrackEntry) + } + + return entries, nil +} + +// Parses a ConntrackStatEntry from given array of fields +func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { + if len(fields) != 17 { + return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") + } + entry := &ConntrackStatEntry{} + + entries, err := parseConntrackStatField(fields[0]) + if err != nil { + return nil, err + } + entry.Entries = entries + + found, err := parseConntrackStatField(fields[2]) + if err != nil { + return nil, err + } + entry.Found = found + + invalid, err := parseConntrackStatField(fields[4]) + if err != nil { + return nil, err + } + entry.Invalid = invalid + + ignore, err := parseConntrackStatField(fields[5]) + if err != nil { + return nil, err + } + entry.Ignore = ignore + + insert, err := parseConntrackStatField(fields[8]) + if err != nil { + return nil, err + } + entry.Insert = insert + + insertFailed, err := parseConntrackStatField(fields[9]) + if err != nil { + return nil, err + } + entry.InsertFailed = insertFailed + + drop, err := parseConntrackStatField(fields[10]) + if err != nil { + return nil, err + } + entry.Drop = drop + + earlyDrop, err := parseConntrackStatField(fields[11]) + if err != nil { + return nil, err + } + entry.EarlyDrop = earlyDrop + + searchRestart, err := parseConntrackStatField(fields[16]) + if err != nil { + return nil, err + } + entry.SearchRestart = searchRestart + + return entry, nil +} + +// Parses a uint64 from given hex in string +func parseConntrackStatField(field string) (uint64, error) { + val, err := strconv.ParseUint(field, 16, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) + } + return val, err +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 000000000..47a710bef --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,205 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NetDev() (NetDev, error) { + return newNetDev(fs.proc.Path("net/dev")) +} + +// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + netDev := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := netDev.parseLine(s.Text()) + if err != nil { + return netDev, err + } + + netDev[line.Name] = *line + } + + return netDev, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma separated list of interface names. +func (netDev NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(netDev)) + for _, ifc := range netDev { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go new file mode 100644 index 000000000..ac01dd847 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -0,0 +1,220 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" +) + +const ( + // readLimit is used by io.LimitReader while reading the content of the + // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic + // as each line represents a single used socket. + // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. + // With e.g. 150 Byte per line and the maximum number of 65535, + // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. + readLimit = 4294967296 // Byte -> 4 GiB +) + +// this contains generic data structures for both udp and tcp sockets +type ( + // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. + NetIPSocket []*netIPSocketLine + + // NetIPSocketSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetIPSocket it does not collect + // the parsed lines into a slice. + NetIPSocketSummary struct { + // TxQueueLength shows the total queue length of all parsed tx_queue lengths. + TxQueueLength uint64 + // RxQueueLength shows the total queue length of all parsed rx_queue lengths. + RxQueueLength uint64 + // UsedSockets shows the total number of parsed lines representing the + // number of used sockets. + UsedSockets uint64 + } + + // netIPSocketLine represents the fields parsed from a single line + // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // For the proc file format details, see https://linux.die.net/man/5/proc. + netIPSocketLine struct { + Sl uint64 + LocalAddr net.IP + LocalPort uint64 + RemAddr net.IP + RemPort uint64 + St uint64 + TxQueue uint64 + RxQueue uint64 + UID uint64 + } +) + +func newNetIPSocket(file string) (NetIPSocket, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var netIPSocket NetIPSocket + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetIPSocketLine(fields) + if err != nil { + return nil, err + } + netIPSocket = append(netIPSocket, line) + } + if err := s.Err(); err != nil { + return nil, err + } + return netIPSocket, nil +} + +// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file. +func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var netIPSocketSummary NetIPSocketSummary + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetIPSocketLine(fields) + if err != nil { + return nil, err + } + netIPSocketSummary.TxQueueLength += line.TxQueue + netIPSocketSummary.RxQueueLength += line.RxQueue + netIPSocketSummary.UsedSockets++ + } + if err := s.Err(); err != nil { + return nil, err + } + return &netIPSocketSummary, nil +} + +// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order. + +func parseIP(hexIP string) (net.IP, error) { + var byteIP []byte + byteIP, err := hex.DecodeString(hexIP) + if err != nil { + return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) + } + switch len(byteIP) { + case 4: + return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil + case 16: + i := net.IP{ + byteIP[3], byteIP[2], byteIP[1], byteIP[0], + byteIP[7], byteIP[6], byteIP[5], byteIP[4], + byteIP[11], byteIP[10], byteIP[9], byteIP[8], + byteIP[15], byteIP[14], byteIP[13], byteIP[12], + } + return i, nil + default: + return nil, fmt.Errorf("Unable to parse IP %s", hexIP) + } +} + +// parseNetIPSocketLine parses a single line, represented by a list of fields. +func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { + line := &netIPSocketLine{} + if len(fields) < 8 { + return nil, fmt.Errorf( + "cannot parse net socket line as it has less then 8 columns %q", + strings.Join(fields, " "), + ) + } + var err error // parse error + + // sl + s := strings.Split(fields[0], ":") + if len(s) != 2 { + return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) + } + + if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) + } + // local_address + l := strings.Split(fields[1], ":") + if len(l) != 2 { + return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) + } + if line.LocalAddr, err = parseIP(l[0]); err != nil { + return nil, err + } + if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) + } + + // remote_address + r := strings.Split(fields[2], ":") + if len(r) != 2 { + return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) + } + if line.RemAddr, err = parseIP(r[0]); err != nil { + return nil, err + } + if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) + } + + // st + if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) + } + + // tx_queue and rx_queue + q := strings.Split(fields[4], ":") + if len(q) != 2 { + return nil, fmt.Errorf( + "cannot parse tx/rx queues in socket line as it has a missing colon %q", + fields[4], + ) + } + if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) + } + if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) + } + + // uid + if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) + } + + return line, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go new file mode 100644 index 000000000..8c6de3791 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -0,0 +1,180 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// NetProtocolStats stores the contents from /proc/net/protocols +type NetProtocolStats map[string]NetProtocolStatLine + +// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We +// only care about the first six columns as the rest are not likely to change +// and only serve to provide a set of capabilities for each protocol. +type NetProtocolStatLine struct { + Name string // 0 The name of the protocol + Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock) + Sockets int64 // 2 Number of sockets in use by this protocol + Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol + Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure. + MaxHeader uint64 // 5 Protocol specific max header size + Slab bool // 6 Indicates whether or not memory is allocated from the SLAB + ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module + Capabilities NetProtocolCapabilities +} + +// NetProtocolCapabilities contains a list of capabilities for each protocol +type NetProtocolCapabilities struct { + Close bool // 8 + Connect bool // 9 + Disconnect bool // 10 + Accept bool // 11 + IoCtl bool // 12 + Init bool // 13 + Destroy bool // 14 + Shutdown bool // 15 + SetSockOpt bool // 16 + GetSockOpt bool // 17 + SendMsg bool // 18 + RecvMsg bool // 19 + SendPage bool // 20 + Bind bool // 21 + BacklogRcv bool // 22 + Hash bool // 23 + UnHash bool // 24 + GetPort bool // 25 + EnterMemoryPressure bool // 26 +} + +// NetProtocols reads stats from /proc/net/protocols and returns a map of +// PortocolStatLine entries. As of this writing no official Linux Documentation +// exists, however the source is fairly self-explanatory and the format seems +// stable since its introduction in 2.6.12-rc2 +// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452 +// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586 +func (fs FS) NetProtocols() (NetProtocolStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols")) + if err != nil { + return NetProtocolStats{}, err + } + return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data))) +} + +func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) { + nps := NetProtocolStats{} + + // Skip the header line + s.Scan() + + for s.Scan() { + line, err := nps.parseLine(s.Text()) + if err != nil { + return NetProtocolStats{}, err + } + + nps[line.Name] = *line + } + return nps, nil +} + +func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) { + line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}} + var err error + const enabled = "yes" + const disabled = "no" + + fields := strings.Fields(rawLine) + line.Name = fields[0] + line.Size, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.Sockets, err = strconv.ParseInt(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.Memory, err = strconv.ParseInt(fields[3], 10, 64) + if err != nil { + return nil, err + } + if fields[4] == enabled { + line.Pressure = 1 + } else if fields[4] == disabled { + line.Pressure = 0 + } else { + line.Pressure = -1 + } + line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + if fields[6] == enabled { + line.Slab = true + } else if fields[6] == disabled { + line.Slab = false + } else { + return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) + } + line.ModuleName = fields[7] + + err = line.Capabilities.parseCapabilities(fields[8:]) + if err != nil { + return nil, err + } + + return line, nil +} + +func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error { + // The capabilities are all bools so we can loop over to map them + capabilityFields := [...]*bool{ + &pc.Close, + &pc.Connect, + &pc.Disconnect, + &pc.Accept, + &pc.IoCtl, + &pc.Init, + &pc.Destroy, + &pc.Shutdown, + &pc.SetSockOpt, + &pc.GetSockOpt, + &pc.SendMsg, + &pc.RecvMsg, + &pc.SendPage, + &pc.Bind, + &pc.BacklogRcv, + &pc.Hash, + &pc.UnHash, + &pc.GetPort, + &pc.EnterMemoryPressure, + } + + for i := 0; i < len(capabilities); i++ { + if capabilities[i] == "y" { + *capabilityFields[i] = true + } else if capabilities[i] == "n" { + *capabilityFields[i] = false + } else { + return fmt.Errorf("unable to parse capability block for protocol: position %d", i) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go new file mode 100644 index 000000000..e36f4872d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -0,0 +1,163 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, +// respectively. +type NetSockstat struct { + // Used is non-nil for IPv4 sockstat results, but nil for IPv6. + Used *int + Protocols []NetSockstatProtocol +} + +// A NetSockstatProtocol contains statistics about a given socket protocol. +// Pointer fields indicate that the value may or may not be present on any +// given protocol. +type NetSockstatProtocol struct { + Protocol string + InUse int + Orphan *int + TW *int + Alloc *int + Mem *int + Memory *int +} + +// NetSockstat retrieves IPv4 socket statistics. +func (fs FS) NetSockstat() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat")) +} + +// NetSockstat6 retrieves IPv6 socket statistics. +// +// If IPv6 is disabled on this kernel, the returned error can be checked with +// os.IsNotExist. +func (fs FS) NetSockstat6() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat6")) +} + +// readSockstat opens and parses a NetSockstat from the input file. +func readSockstat(name string) (*NetSockstat, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(name) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseSockstat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) + } + + return stat, nil +} + +// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. +func parseSockstat(r io.Reader) (*NetSockstat, error) { + var stat NetSockstat + s := bufio.NewScanner(r) + for s.Scan() { + // Expect a minimum of a protocol and one key/value pair. + fields := strings.Split(s.Text(), " ") + if len(fields) < 3 { + return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + } + + // The remaining fields are key/value pairs. + kvs, err := parseSockstatKVs(fields[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) + } + + // The first field is the protocol. We must trim its colon suffix. + proto := strings.TrimSuffix(fields[0], ":") + switch proto { + case "sockets": + // Special case: IPv4 has a sockets "used" key/value pair that we + // embed at the top level of the structure. + used := kvs["used"] + stat.Used = &used + default: + // Parse all other lines as individual protocols. + nsp := parseSockstatProtocol(kvs) + nsp.Protocol = proto + stat.Protocols = append(stat.Protocols, nsp) + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return &stat, nil +} + +// parseSockstatKVs parses a string slice into a map of key/value pairs. +func parseSockstatKVs(kvs []string) (map[string]int, error) { + if len(kvs)%2 != 0 { + return nil, errors.New("odd number of fields in key/value pairs") + } + + // Iterate two values at a time to gather key/value pairs. + out := make(map[string]int, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + vp := util.NewValueParser(kvs[i+1]) + out[kvs[i]] = vp.Int() + + if err := vp.Err(); err != nil { + return nil, err + } + } + + return out, nil +} + +// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. +func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { + var nsp NetSockstatProtocol + for k, v := range kvs { + // Capture the range variable to ensure we get unique pointers for + // each of the optional fields. + v := v + switch k { + case "inuse": + nsp.InUse = v + case "orphan": + nsp.Orphan = &v + case "tw": + nsp.TW = &v + case "alloc": + nsp.Alloc = &v + case "mem": + nsp.Mem = &v + case "memory": + nsp.Memory = &v + } + } + + return nsp +} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go new file mode 100644 index 000000000..46f12c61d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -0,0 +1,102 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// For the proc file format details, +// See: +// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 +// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 +// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. + +// SoftnetStat contains a single row of data from /proc/net/softnet_stat +type SoftnetStat struct { + // Number of processed packets + Processed uint32 + // Number of dropped packets + Dropped uint32 + // Number of times processing packets ran out of quota + TimeSqueezed uint32 +} + +var softNetProcFile = "net/softnet_stat" + +// NetSoftnetStat reads data from /proc/net/softnet_stat. +func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { + b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) + if err != nil { + return nil, err + } + + entries, err := parseSoftnet(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) + } + + return entries, nil +} + +func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { + const minColumns = 9 + + s := bufio.NewScanner(r) + + var stats []SoftnetStat + for s.Scan() { + columns := strings.Fields(s.Text()) + width := len(columns) + + if width < minColumns { + return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + } + + // We only parse the first three columns at the moment. + us, err := parseHexUint32s(columns[0:3]) + if err != nil { + return nil, err + } + + stats = append(stats, SoftnetStat{ + Processed: us[0], + Dropped: us[1], + TimeSqueezed: us[2], + }) + } + + return stats, nil +} + +func parseHexUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go new file mode 100644 index 000000000..527762955 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -0,0 +1,64 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +type ( + // NetTCP represents the contents of /proc/net/tcp{,6} file without the header. + NetTCP []*netIPSocketLine + + // NetTCPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetTCP it does not collect + // the parsed lines into a slice. + NetTCPSummary NetIPSocketSummary +) + +// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams +// read from /proc/net/tcp. +func (fs FS) NetTCP() (NetTCP, error) { + return newNetTCP(fs.proc.Path("net/tcp")) +} + +// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams +// read from /proc/net/tcp6. +func (fs FS) NetTCP6() (NetTCP, error) { + return newNetTCP(fs.proc.Path("net/tcp6")) +} + +// NetTCPSummary returns already computed statistics like the total queue lengths +// for TCP datagrams read from /proc/net/tcp. +func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { + return newNetTCPSummary(fs.proc.Path("net/tcp")) +} + +// NetTCP6Summary returns already computed statistics like the total queue lengths +// for TCP datagrams read from /proc/net/tcp6. +func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { + return newNetTCPSummary(fs.proc.Path("net/tcp6")) +} + +// newNetTCP creates a new NetTCP{,6} from the contents of the given file. +func newNetTCP(file string) (NetTCP, error) { + n, err := newNetIPSocket(file) + n1 := NetTCP(n) + return n1, err +} + +func newNetTCPSummary(file string) (*NetTCPSummary, error) { + n, err := newNetIPSocketSummary(file) + if n == nil { + return nil, err + } + n1 := NetTCPSummary(*n) + return &n1, err +} diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go new file mode 100644 index 000000000..9ac3daf2d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -0,0 +1,64 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +type ( + // NetUDP represents the contents of /proc/net/udp{,6} file without the header. + NetUDP []*netIPSocketLine + + // NetUDPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetUDP it does not collect + // the parsed lines into a slice. + NetUDPSummary NetIPSocketSummary +) + +// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp. +func (fs FS) NetUDP() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp")) +} + +// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp6. +func (fs FS) NetUDP6() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp6")) +} + +// NetUDPSummary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp. +func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp")) +} + +// NetUDP6Summary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp6. +func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp6")) +} + +// newNetUDP creates a new NetUDP{,6} from the contents of the given file. +func newNetUDP(file string) (NetUDP, error) { + n, err := newNetIPSocket(file) + n1 := NetUDP(n) + return n1, err +} + +func newNetUDPSummary(file string) (*NetUDPSummary, error) { + n, err := newNetIPSocketSummary(file) + if n == nil { + return nil, err + } + n1 := NetUDPSummary(*n) + return &n1, err +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go new file mode 100644 index 000000000..98aa8e1c3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -0,0 +1,257 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 +// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. + +// Constants for the various /proc/net/unix enumerations. +// TODO: match against x/sys/unix or similar? +const ( + netUnixTypeStream = 1 + netUnixTypeDgram = 2 + netUnixTypeSeqpacket = 5 + + netUnixFlagDefault = 0 + netUnixFlagListen = 1 << 16 + + netUnixStateUnconnected = 1 + netUnixStateConnecting = 2 + netUnixStateConnected = 3 + netUnixStateDisconnected = 4 +) + +// NetUNIXType is the type of the type field. +type NetUNIXType uint64 + +// NetUNIXFlags is the type of the flags field. +type NetUNIXFlags uint64 + +// NetUNIXState is the type of the state field. +type NetUNIXState uint64 + +// NetUNIXLine represents a line of /proc/net/unix. +type NetUNIXLine struct { + KernelPtr string + RefCount uint64 + Protocol uint64 + Flags NetUNIXFlags + Type NetUNIXType + State NetUNIXState + Inode uint64 + Path string +} + +// NetUNIX holds the data read from /proc/net/unix. +type NetUNIX struct { + Rows []*NetUNIXLine +} + +// NetUNIX returns data read from /proc/net/unix. +func (fs FS) NetUNIX() (*NetUNIX, error) { + return readNetUNIX(fs.proc.Path("net/unix")) +} + +// readNetUNIX reads data in /proc/net/unix format from the specified file. +func readNetUNIX(file string) (*NetUNIX, error) { + // This file could be quite large and a streaming read is desirable versus + // reading the entire contents at once. + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + return parseNetUNIX(f) +} + +// parseNetUNIX creates a NetUnix structure from the incoming stream. +func parseNetUNIX(r io.Reader) (*NetUNIX, error) { + // Begin scanning by checking for the existence of Inode. + s := bufio.NewScanner(r) + s.Scan() + + // From the man page of proc(5), it does not contain an Inode field, + // but in actually it exists. This code works for both cases. + hasInode := strings.Contains(s.Text(), "Inode") + + // Expect a minimum number of fields, but Inode and Path are optional: + // Num RefCount Protocol Flags Type St Inode Path + minFields := 6 + if hasInode { + minFields++ + } + + var nu NetUNIX + for s.Scan() { + line := s.Text() + item, err := nu.parseLine(line, hasInode, minFields) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) + } + + nu.Rows = append(nu.Rows, item) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) + } + + return &nu, nil +} + +func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { + fields := strings.Fields(line) + + l := len(fields) + if l < min { + return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + } + + // Field offsets are as follows: + // Num RefCount Protocol Flags Type St Inode Path + + kernelPtr := strings.TrimSuffix(fields[0], ":") + + users, err := u.parseUsers(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) + } + + flags, err := u.parseFlags(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) + } + + typ, err := u.parseType(fields[4]) + if err != nil { + return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) + } + + state, err := u.parseState(fields[5]) + if err != nil { + return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) + } + + var inode uint64 + if hasInode { + inode, err = u.parseInode(fields[6]) + if err != nil { + return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) + } + } + + n := &NetUNIXLine{ + KernelPtr: kernelPtr, + RefCount: users, + Type: typ, + Flags: flags, + State: state, + Inode: inode, + } + + // Path field is optional. + if l > min { + // Path occurs at either index 6 or 7 depending on whether inode is + // already present. + pathIdx := 7 + if !hasInode { + pathIdx-- + } + + n.Path = fields[pathIdx] + } + + return n, nil +} + +func (u NetUNIX) parseUsers(s string) (uint64, error) { + return strconv.ParseUint(s, 16, 32) +} + +func (u NetUNIX) parseType(s string) (NetUNIXType, error) { + typ, err := strconv.ParseUint(s, 16, 16) + if err != nil { + return 0, err + } + + return NetUNIXType(typ), nil +} + +func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { + flags, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return 0, err + } + + return NetUNIXFlags(flags), nil +} + +func (u NetUNIX) parseState(s string) (NetUNIXState, error) { + st, err := strconv.ParseInt(s, 16, 8) + if err != nil { + return 0, err + } + + return NetUNIXState(st), nil +} + +func (u NetUNIX) parseInode(s string) (uint64, error) { + return strconv.ParseUint(s, 10, 64) +} + +func (t NetUNIXType) String() string { + switch t { + case netUnixTypeStream: + return "stream" + case netUnixTypeDgram: + return "dgram" + case netUnixTypeSeqpacket: + return "seqpacket" + } + return "unknown" +} + +func (f NetUNIXFlags) String() string { + switch f { + case netUnixFlagListen: + return "listen" + default: + return "default" + } +} + +func (s NetUNIXState) String() string { + switch s { + case netUnixStateUnconnected: + return "unconnected" + case netUnixStateConnecting: + return "connecting" + case netUnixStateConnected: + return "connected" + case netUnixStateDisconnected: + return "disconnected" + } + return "unknown" +} diff --git a/vendor/github.com/m3db/prometheus_procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go similarity index 55% rename from vendor/github.com/m3db/prometheus_procfs/proc.go rename to vendor/github.com/prometheus/procfs/proc.go index 8717e1fe0..28f696803 100644 --- a/vendor/github.com/m3db/prometheus_procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -1,11 +1,28 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( + "bytes" "fmt" "io/ioutil" "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // Proc provides information about a running process. @@ -13,7 +30,7 @@ type Proc struct { // The process ID. PID int - fs FS + fs fs.FS } // Procs represents a list of Proc structs. @@ -38,7 +55,7 @@ func NewProc(pid int) (Proc, error) { if err != nil { return Proc{}, err } - return fs.NewProc(pid) + return fs.Proc(pid) } // AllProcs returns a list of all currently available processes under /proc. @@ -52,28 +69,35 @@ func AllProcs() (Procs, error) { // Self returns a process for the current process. func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.Path("self")) + p, err := os.Readlink(fs.proc.Path("self")) if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) if err != nil { return Proc{}, err } - return fs.NewProc(pid) + return fs.Proc(pid) } // NewProc returns a process for the given pid. +// +// Deprecated: use fs.Proc() instead func (fs FS) NewProc(pid int) (Proc, error) { - if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return fs.Proc(pid) +} + +// Proc returns a process for the given pid. +func (fs FS) Proc(pid int) (Proc, error) { + if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs}, nil + return Proc{PID: pid, fs: fs.proc}, nil } // AllProcs returns a list of all currently available processes. func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.Path()) + d, err := os.Open(fs.proc.Path()) if err != nil { return Procs{}, err } @@ -81,7 +105,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) } p := Procs{} @@ -90,7 +114,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs}) + p = append(p, Proc{PID: int(pid), fs: fs.proc}) } return p, nil @@ -98,13 +122,7 @@ func (fs FS) AllProcs() (Procs, error) { // CmdLine returns the command line of a process. func (p Proc) CmdLine() ([]string, error) { - f, err := os.Open(p.path("cmdline")) - if err != nil { - return nil, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("cmdline")) if err != nil { return nil, err } @@ -113,12 +131,12 @@ func (p Proc) CmdLine() ([]string, error) { return []string{}, nil } - return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil } -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - f, err := os.Open(p.path("comm")) +// Wchan returns the wchan (wait channel) of a process. +func (p Proc) Wchan() (string, error) { + f, err := os.Open(p.path("wchan")) if err != nil { return "", err } @@ -129,6 +147,21 @@ func (p Proc) Comm() (string, error) { return "", err } + wchan := string(data) + if wchan == "" || wchan == "0" { + return "", nil + } + + return wchan, nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + data, err := util.ReadFileNoStat(p.path("comm")) + if err != nil { + return "", err + } + return strings.TrimSpace(string(data)), nil } @@ -142,6 +175,26 @@ func (p Proc) Executable() (string, error) { return exe, err } +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot) +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + // FileDescriptors returns the currently open file descriptors of a process. func (p Proc) FileDescriptors() ([]uintptr, error) { names, err := p.fileDescriptors() @@ -153,7 +206,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + return nil, fmt.Errorf("could not parse fd %q: %w", n, err) } fds[i] = uintptr(fd) } @@ -204,6 +257,18 @@ func (p Proc) MountStats() ([]*Mount, error) { return parseMountStats(f) } +// MountInfo retrieves mount information for mount points in a +// process's namespace. +// It supplies information missing in `/proc/self/mounts` and +// fixes various other problems with that file too. +func (p Proc) MountInfo() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(p.path("mountinfo")) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + func (p Proc) fileDescriptors() ([]string, error) { d, err := os.Open(p.path("fd")) if err != nil { @@ -213,7 +278,7 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) } return names, nil @@ -222,3 +287,33 @@ func (p Proc) fileDescriptors() ([]string, error) { func (p Proc) path(pa ...string) string { return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) } + +// FileDescriptorsInfo retrieves information about all file descriptors of +// the process. +func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + var fdinfos ProcFDInfos + + for _, n := range names { + fdinfo, err := p.FDInfo(n) + if err != nil { + continue + } + fdinfos = append(fdinfos, *fdinfo) + } + + return fdinfos, nil +} + +// Schedstat returns task scheduling information for the process. +func (p Proc) Schedstat() (ProcSchedstat, error) { + contents, err := ioutil.ReadFile(p.path("schedstat")) + if err != nil { + return ProcSchedstat{}, err + } + return parseProcSchedstat(string(contents)) +} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go new file mode 100644 index 000000000..0094a13c0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -0,0 +1,98 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies +// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in +// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of +// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID +// in this hierarchy +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type Cgroup struct { + // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one + // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number + HierarchyID int + // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For + // Cgroups V2 this may be empty, as all active controllers use the same hierarchy + Controllers []string + // Path of this control group, relative to the mount point of the cgroupfs representing this specific + // hierarchy + Path string +} + +// parseCgroupString parses each line of the /proc/[pid]/cgroup file +// Line format is hierarchyID:[controller1,controller2]:path +func parseCgroupString(cgroupStr string) (*Cgroup, error) { + var err error + + fields := strings.SplitN(cgroupStr, ":", 3) + if len(fields) < 3 { + return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + } + + cgroup := &Cgroup{ + Path: fields[2], + Controllers: nil, + } + cgroup.HierarchyID, err = strconv.Atoi(fields[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + if fields[1] != "" { + ssNames := strings.Split(fields[1], ",") + cgroup.Controllers = append(cgroup.Controllers, ssNames...) + } + return cgroup, nil +} + +// parseCgroups reads each line of the /proc/[pid]/cgroup file +func parseCgroups(data []byte) ([]Cgroup, error) { + var cgroups []Cgroup + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseCgroupString(mountString) + if err != nil { + return nil, err + } + cgroups = append(cgroups, *parsedMounts) + } + + err := scanner.Err() + return cgroups, err +} + +// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process +// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, +// so the len of the returned struct is equal to the number of active hierarchies on this system +func (p Proc) Cgroups() ([]Cgroup, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID)) + if err != nil { + return nil, err + } + return parseCgroups(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go new file mode 100644 index 000000000..6134b3580 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Environ reads process environments from /proc//environ +func (p Proc) Environ() ([]string, error) { + environments := make([]string, 0) + + data, err := util.ReadFileNoStat(p.path("environ")) + if err != nil { + return environments, err + } + + environments = strings.Split(string(data), "\000") + if len(environments) > 0 { + environments = environments[:len(environments)-1] + } + + return environments, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go new file mode 100644 index 000000000..cf63227f0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -0,0 +1,133 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "regexp" + + "github.com/prometheus/procfs/internal/util" +) + +// Regexp variables +var ( + rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) + rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) + rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rInotify = regexp.MustCompile(`^inotify`) + rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) +) + +// ProcFDInfo contains represents file descriptor information. +type ProcFDInfo struct { + // File descriptor + FD string + // File offset + Pos string + // File access mode and status flags + Flags string + // Mount point ID + MntID string + // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) + InotifyInfos []InotifyInfo +} + +// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. +func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { + data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) + if err != nil { + return nil, err + } + + var text, pos, flags, mntid string + var inotify []InotifyInfo + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + text = scanner.Text() + if rPos.MatchString(text) { + pos = rPos.FindStringSubmatch(text)[1] + } else if rFlags.MatchString(text) { + flags = rFlags.FindStringSubmatch(text)[1] + } else if rMntID.MatchString(text) { + mntid = rMntID.FindStringSubmatch(text)[1] + } else if rInotify.MatchString(text) { + newInotify, err := parseInotifyInfo(text) + if err != nil { + return nil, err + } + inotify = append(inotify, *newInotify) + } + } + + i := &ProcFDInfo{ + FD: fd, + Pos: pos, + Flags: flags, + MntID: mntid, + InotifyInfos: inotify, + } + + return i, nil +} + +// InotifyInfo represents a single inotify line in the fdinfo file. +type InotifyInfo struct { + // Watch descriptor number + WD string + // Inode number + Ino string + // Device ID + Sdev string + // Mask of events being monitored + Mask string +} + +// InotifyInfo constructor. Only available on kernel 3.8+. +func parseInotifyInfo(line string) (*InotifyInfo, error) { + m := rInotifyParts.FindStringSubmatch(line) + if len(m) >= 4 { + var mask string + if len(m) == 5 { + mask = m[4] + } + i := &InotifyInfo{ + WD: m[1], + Ino: m[2], + Sdev: m[3], + Mask: mask, + } + return i, nil + } + return nil, fmt.Errorf("invalid inode entry: %q", line) +} + +// ProcFDInfos represents a list of ProcFDInfo structs. +type ProcFDInfos []ProcFDInfo + +func (p ProcFDInfos) Len() int { return len(p) } +func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } + +// InotifyWatchLen returns the total number of inotify watches +func (p ProcFDInfos) InotifyWatchLen() (int, error) { + length := 0 + for _, f := range p { + length += len(f.InotifyInfos) + } + + return length, nil +} diff --git a/vendor/github.com/m3db/prometheus_procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go similarity index 50% rename from vendor/github.com/m3db/prometheus_procfs/proc_io.go rename to vendor/github.com/prometheus/procfs/proc_io.go index b4e31d7ba..776f34971 100644 --- a/vendor/github.com/m3db/prometheus_procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -1,9 +1,22 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( "fmt" - "io/ioutil" - "os" + + "github.com/prometheus/procfs/internal/util" ) // ProcIO models the content of /proc//io. @@ -26,17 +39,11 @@ type ProcIO struct { CancelledWriteBytes int64 } -// NewIO creates a new ProcIO instance from a given Proc instance. -func (p Proc) NewIO() (ProcIO, error) { +// IO creates a new ProcIO instance from a given Proc instance. +func (p Proc) IO() (ProcIO, error) { pio := ProcIO{} - f, err := os.Open(p.path("io")) - if err != nil { - return pio, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("io")) if err != nil { return pio, err } @@ -47,9 +54,6 @@ func (p Proc) NewIO() (ProcIO, error) { _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - if err != nil { - return pio, err - } - return pio, nil + return pio, err } diff --git a/vendor/github.com/m3db/prometheus_procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go similarity index 51% rename from vendor/github.com/m3db/prometheus_procfs/proc_limits.go rename to vendor/github.com/prometheus/procfs/proc_limits.go index 2df997ce1..dd20f198a 100644 --- a/vendor/github.com/m3db/prometheus_procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( @@ -13,59 +26,66 @@ import ( // http://man7.org/linux/man-pages/man2/getrlimit.2.html. type ProcLimits struct { // CPU time limit in seconds. - CPUTime int + CPUTime uint64 // Maximum size of files that the process may create. - FileSize int + FileSize uint64 // Maximum size of the process's data segment (initialized data, // uninitialized data, and heap). - DataSize int + DataSize uint64 // Maximum size of the process stack in bytes. - StackSize int + StackSize uint64 // Maximum size of a core file. - CoreFileSize int + CoreFileSize uint64 // Limit of the process's resident set in pages. - ResidentSet int + ResidentSet uint64 // Maximum number of processes that can be created for the real user ID of // the calling process. - Processes int + Processes uint64 // Value one greater than the maximum file descriptor number that can be // opened by this process. - OpenFiles int + OpenFiles uint64 // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int + LockedMemory uint64 // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int + AddressSpace uint64 // Limit on the combined number of flock(2) locks and fcntl(2) leases that // this process may establish. - FileLocks int + FileLocks uint64 // Limit of signals that may be queued for the real user ID of the calling // process. - PendingSignals int + PendingSignals uint64 // Limit on the number of bytes that can be allocated for POSIX message // queues for the real user ID of the calling process. - MsqqueueSize int + MsqqueueSize uint64 // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int + NicePriority uint64 // Limit of the real-time priority set using sched_setscheduler(2) or // sched_setparam(2). - RealtimePriority int + RealtimePriority uint64 // Limit (in microseconds) on the amount of CPU time that a process // scheduled under a real-time scheduling policy may consume without making // a blocking system call. - RealtimeTimeout int + RealtimeTimeout uint64 } const ( - limitsFields = 3 + limitsFields = 4 limitsUnlimited = "unlimited" ) var ( - limitsDelimiter = regexp.MustCompile(" +") + limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) ) // NewLimits returns the current soft limits of the process. +// +// Deprecated: use p.Limits() instead func (p Proc) NewLimits() (ProcLimits, error) { + return p.Limits() +} + +// Limits returns the current soft limits of the process. +func (p Proc) Limits() (ProcLimits, error) { f, err := os.Open(p.path("limits")) if err != nil { return ProcLimits{}, err @@ -76,46 +96,49 @@ func (p Proc) NewLimits() (ProcLimits, error) { l = ProcLimits{} s = bufio.NewScanner(f) ) + + s.Scan() // Skip limits header + for s.Scan() { - fields := limitsDelimiter.Split(s.Text(), limitsFields) + //fields := limitsMatch.Split(s.Text(), limitsFields) + fields := limitsMatch.FindStringSubmatch(s.Text()) if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf( - "couldn't parse %s line %s", f.Name(), s.Text()) + return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) } - switch fields[0] { + switch fields[1] { case "Max cpu time": - l.CPUTime, err = parseInt(fields[1]) + l.CPUTime, err = parseUint(fields[2]) case "Max file size": - l.FileSize, err = parseInt(fields[1]) + l.FileSize, err = parseUint(fields[2]) case "Max data size": - l.DataSize, err = parseInt(fields[1]) + l.DataSize, err = parseUint(fields[2]) case "Max stack size": - l.StackSize, err = parseInt(fields[1]) + l.StackSize, err = parseUint(fields[2]) case "Max core file size": - l.CoreFileSize, err = parseInt(fields[1]) + l.CoreFileSize, err = parseUint(fields[2]) case "Max resident set": - l.ResidentSet, err = parseInt(fields[1]) + l.ResidentSet, err = parseUint(fields[2]) case "Max processes": - l.Processes, err = parseInt(fields[1]) + l.Processes, err = parseUint(fields[2]) case "Max open files": - l.OpenFiles, err = parseInt(fields[1]) + l.OpenFiles, err = parseUint(fields[2]) case "Max locked memory": - l.LockedMemory, err = parseInt(fields[1]) + l.LockedMemory, err = parseUint(fields[2]) case "Max address space": - l.AddressSpace, err = parseInt(fields[1]) + l.AddressSpace, err = parseUint(fields[2]) case "Max file locks": - l.FileLocks, err = parseInt(fields[1]) + l.FileLocks, err = parseUint(fields[2]) case "Max pending signals": - l.PendingSignals, err = parseInt(fields[1]) + l.PendingSignals, err = parseUint(fields[2]) case "Max msgqueue size": - l.MsqqueueSize, err = parseInt(fields[1]) + l.MsqqueueSize, err = parseUint(fields[2]) case "Max nice priority": - l.NicePriority, err = parseInt(fields[1]) + l.NicePriority, err = parseUint(fields[2]) case "Max realtime priority": - l.RealtimePriority, err = parseInt(fields[1]) + l.RealtimePriority, err = parseUint(fields[2]) case "Max realtime timeout": - l.RealtimeTimeout, err = parseInt(fields[1]) + l.RealtimeTimeout, err = parseUint(fields[2]) } if err != nil { return ProcLimits{}, err @@ -125,13 +148,13 @@ func (p Proc) NewLimits() (ProcLimits, error) { return l, s.Err() } -func parseInt(s string) (int, error) { +func parseUint(s string) (uint64, error) { if s == limitsUnlimited { - return -1, nil + return 18446744073709551615, nil } - i, err := strconv.ParseInt(s, 10, 32) + i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) } - return int(i), nil + return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go new file mode 100644 index 000000000..1d7772d51 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -0,0 +1,209 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// ProcMapPermissions contains permission settings read from /proc/[pid]/maps +type ProcMapPermissions struct { + // mapping has the [R]ead flag set + Read bool + // mapping has the [W]rite flag set + Write bool + // mapping has the [X]ecutable flag set + Execute bool + // mapping has the [S]hared flag set + Shared bool + // mapping is marked as [P]rivate (copy on write) + Private bool +} + +// ProcMap contains the process memory-mappings of the process, +// read from /proc/[pid]/maps +type ProcMap struct { + // The start address of current mapping. + StartAddr uintptr + // The end address of the current mapping + EndAddr uintptr + // The permissions for this mapping + Perms *ProcMapPermissions + // The current offset into the file/fd (e.g., shared libs) + Offset int64 + // Device owner of this mapping (major:minor) in Mkdev format. + Dev uint64 + // The inode of the device above + Inode uint64 + // The file or psuedofile (or empty==anonymous) + Pathname string +} + +// parseDevice parses the device token of a line and converts it to a dev_t +// (mkdev) like structure. +func parseDevice(s string) (uint64, error) { + toks := strings.Split(s, ":") + if len(toks) < 2 { + return 0, fmt.Errorf("unexpected number of fields") + } + + major, err := strconv.ParseUint(toks[0], 16, 0) + if err != nil { + return 0, err + } + + minor, err := strconv.ParseUint(toks[1], 16, 0) + if err != nil { + return 0, err + } + + return unix.Mkdev(uint32(major), uint32(minor)), nil +} + +// parseAddress just converts a hex-string to a uintptr +func parseAddress(s string) (uintptr, error) { + a, err := strconv.ParseUint(s, 16, 0) + if err != nil { + return 0, err + } + + return uintptr(a), nil +} + +// parseAddresses parses the start-end address +func parseAddresses(s string) (uintptr, uintptr, error) { + toks := strings.Split(s, "-") + if len(toks) < 2 { + return 0, 0, fmt.Errorf("invalid address") + } + + saddr, err := parseAddress(toks[0]) + if err != nil { + return 0, 0, err + } + + eaddr, err := parseAddress(toks[1]) + if err != nil { + return 0, 0, err + } + + return saddr, eaddr, nil +} + +// parsePermissions parses a token and returns any that are set. +func parsePermissions(s string) (*ProcMapPermissions, error) { + if len(s) < 4 { + return nil, fmt.Errorf("invalid permissions token") + } + + perms := ProcMapPermissions{} + for _, ch := range s { + switch ch { + case 'r': + perms.Read = true + case 'w': + perms.Write = true + case 'x': + perms.Execute = true + case 'p': + perms.Private = true + case 's': + perms.Shared = true + } + } + + return &perms, nil +} + +// parseProcMap will attempt to parse a single line within a proc/[pid]/maps +// buffer. +func parseProcMap(text string) (*ProcMap, error) { + fields := strings.Fields(text) + if len(fields) < 5 { + return nil, fmt.Errorf("truncated procmap entry") + } + + saddr, eaddr, err := parseAddresses(fields[0]) + if err != nil { + return nil, err + } + + perms, err := parsePermissions(fields[1]) + if err != nil { + return nil, err + } + + offset, err := strconv.ParseInt(fields[2], 16, 0) + if err != nil { + return nil, err + } + + device, err := parseDevice(fields[3]) + if err != nil { + return nil, err + } + + inode, err := strconv.ParseUint(fields[4], 10, 0) + if err != nil { + return nil, err + } + + pathname := "" + + if len(fields) >= 5 { + pathname = strings.Join(fields[5:], " ") + } + + return &ProcMap{ + StartAddr: saddr, + EndAddr: eaddr, + Perms: perms, + Offset: offset, + Dev: device, + Inode: inode, + Pathname: pathname, + }, nil +} + +// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the +// process. +func (p Proc) ProcMaps() ([]*ProcMap, error) { + file, err := os.Open(p.path("maps")) + if err != nil { + return nil, err + } + defer file.Close() + + maps := []*ProcMap{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + m, err := parseProcMap(scan.Text()) + if err != nil { + return nil, err + } + + maps = append(maps, m) + } + + return maps, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 000000000..391b4cbd1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// Namespaces reads from /proc//ns/* to get the namespaces of which the +// process is a member. +func (p Proc) Namespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go new file mode 100644 index 000000000..dc6c14f0a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// The PSI / pressure interface is described at +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt +// Each resource (cpu, io, memory, ...) is exposed as a single file. +// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. +// Each line contains several averages (over n seconds) and a total in µs. +// +// Example io pressure file: +// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 +// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" + +// PSILine is a single line of values as returned by /proc/pressure/* +// The Avg entries are averages over n seconds, as a percentage +// The Total line is in microseconds +type PSILine struct { + Avg10 float64 + Avg60 float64 + Avg300 float64 + Total uint64 +} + +// PSIStats represent pressure stall information from /proc/pressure/* +// Some indicates the share of time in which at least some tasks are stalled +// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +type PSIStats struct { + Some *PSILine + Full *PSILine +} + +// PSIStatsForResource reads pressure stall information for the specified +// resource from /proc/pressure/. At time of writing this can be +// either "cpu", "memory" or "io". +func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + if err != nil { + return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) + } + + return parsePSIStats(resource, bytes.NewReader(data)) +} + +// parsePSIStats parses the specified file for pressure stall information +func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { + psiStats := PSIStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() + prefix := strings.Split(l, " ")[0] + switch prefix { + case "some": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Some = &psi + case "full": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Full = &psi + default: + // If we encounter a line with an unknown prefix, ignore it and move on + // Should new measurement types be added in the future we'll simply ignore them instead + // of erroring on retrieval + continue + } + } + + return psiStats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go new file mode 100644 index 000000000..a576a720a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -0,0 +1,165 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bufio" + "errors" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + // match the header line before each mapped zone in /proc/pid/smaps + procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) +) + +type ProcSMapsRollup struct { + // Amount of the mapping that is currently resident in RAM + Rss uint64 + // Process's proportional share of this mapping + Pss uint64 + // Size in bytes of clean shared pages + SharedClean uint64 + // Size in bytes of dirty shared pages + SharedDirty uint64 + // Size in bytes of clean private pages + PrivateClean uint64 + // Size in bytes of dirty private pages + PrivateDirty uint64 + // Amount of memory currently marked as referenced or accessed + Referenced uint64 + // Amount of memory that does not belong to any file + Anonymous uint64 + // Amount would-be-anonymous memory currently on swap + Swap uint64 + // Process's proportional memory on swap + SwapPss uint64 +} + +// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the +// process. +// +// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will +// we read and summed. +func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { + data, err := util.ReadFileNoStat(p.path("smaps_rollup")) + if err != nil && os.IsNotExist(err) { + return p.procSMapsRollupManual() + } + if err != nil { + return ProcSMapsRollup{}, err + } + + lines := strings.Split(string(data), "\n") + smaps := ProcSMapsRollup{} + + // skip first line which don't contains information we need + lines = lines[1:] + for _, line := range lines { + if line == "" { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +// Read /proc/pid/smaps and do the roll-up in Go code. +func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { + file, err := os.Open(p.path("smaps")) + if err != nil { + return ProcSMapsRollup{}, err + } + defer file.Close() + + smaps := ProcSMapsRollup{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + line := scan.Text() + + if procSMapsHeaderLine.MatchString(line) { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +func (s *ProcSMapsRollup) parseLine(line string) error { + kv := strings.SplitN(line, ":", 2) + if len(kv) != 2 { + fmt.Println(line) + return errors.New("invalid net/dev line, missing colon") + } + + k := kv[0] + if k == "VmFlags" { + return nil + } + + v := strings.TrimSpace(kv[1]) + v = strings.TrimRight(v, " kB") + + vKBytes, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return err + } + vBytes := vKBytes * 1024 + + s.addValue(k, v, vKBytes, vBytes) + + return nil +} + +func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Rss": + s.Rss += vUintBytes + case "Pss": + s.Pss += vUintBytes + case "Shared_Clean": + s.SharedClean += vUintBytes + case "Shared_Dirty": + s.SharedDirty += vUintBytes + case "Private_Clean": + s.PrivateClean += vUintBytes + case "Private_Dirty": + s.PrivateDirty += vUintBytes + case "Referenced": + s.Referenced += vUintBytes + case "Anonymous": + s.Anonymous += vUintBytes + case "Swap": + s.Swap += vUintBytes + case "SwapPss": + s.SwapPss += vUintBytes + } +} diff --git a/vendor/github.com/m3db/prometheus_procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go similarity index 79% rename from vendor/github.com/m3db/prometheus_procfs/proc_stat.go rename to vendor/github.com/prometheus/procfs/proc_stat.go index 724e271b9..67ca0e9fb 100644 --- a/vendor/github.com/m3db/prometheus_procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -1,10 +1,25 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package procfs import ( "bytes" "fmt" - "io/ioutil" "os" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // Originally, this USER_HZ value was dynamically retrieved via a sysconf call @@ -82,22 +97,23 @@ type ProcStat struct { // in clock ticks. Starttime uint64 // Virtual memory size in bytes. - VSize int + VSize uint // Resident set size in pages. RSS int - fs FS + proc fs.FS } // NewStat returns the current status information of the process. +// +// Deprecated: use p.Stat() instead func (p Proc) NewStat() (ProcStat, error) { - f, err := os.Open(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - defer f.Close() + return p.Stat() +} - data, err := ioutil.ReadAll(f) +// Stat returns the current status information of the process. +func (p Proc) Stat() (ProcStat, error) { + data, err := util.ReadFileNoStat(p.path("stat")) if err != nil { return ProcStat{}, err } @@ -105,16 +121,13 @@ func (p Proc) NewStat() (ProcStat, error) { var ( ignore int - s = ProcStat{PID: p.PID, fs: p.fs} + s = ProcStat{PID: p.PID, proc: p.fs} l = bytes.Index(data, []byte("(")) r = bytes.LastIndex(data, []byte(")")) ) if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf( - "unexpected format, couldn't extract comm: %s", - data, - ) + return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) } s.Comm = string(data[l+1 : r]) @@ -151,7 +164,7 @@ func (p Proc) NewStat() (ProcStat, error) { } // VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() int { +func (s ProcStat) VirtualMemory() uint { return s.VSize } @@ -162,7 +175,8 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - stat, err := s.fs.NewStat() + fs := FS{proc: s.proc} + stat, err := fs.Stat() if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go new file mode 100644 index 000000000..6edd8333b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -0,0 +1,170 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcStatus provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStatus struct { + // The process ID. + PID int + // The process name. + Name string + + // Thread group ID. + TGID int + + // Peak virtual memory size. + VmPeak uint64 // nolint:golint + // Virtual memory size. + VmSize uint64 // nolint:golint + // Locked memory size. + VmLck uint64 // nolint:golint + // Pinned memory size. + VmPin uint64 // nolint:golint + // Peak resident set size. + VmHWM uint64 // nolint:golint + // Resident set size (sum of RssAnnon RssFile and RssShmem). + VmRSS uint64 // nolint:golint + // Size of resident anonymous memory. + RssAnon uint64 // nolint:golint + // Size of resident file mappings. + RssFile uint64 // nolint:golint + // Size of resident shared memory. + RssShmem uint64 // nolint:golint + // Size of data segments. + VmData uint64 // nolint:golint + // Size of stack segments. + VmStk uint64 // nolint:golint + // Size of text segments. + VmExe uint64 // nolint:golint + // Shared library code size. + VmLib uint64 // nolint:golint + // Page table entries size. + VmPTE uint64 // nolint:golint + // Size of second-level page tables. + VmPMD uint64 // nolint:golint + // Swapped-out virtual memory size by anonymous private. + VmSwap uint64 // nolint:golint + // Size of hugetlb memory portions + HugetlbPages uint64 + + // Number of voluntary context switches. + VoluntaryCtxtSwitches uint64 + // Number of involuntary context switches. + NonVoluntaryCtxtSwitches uint64 + + // UIDs of the process (Real, effective, saved set, and filesystem UIDs) + UIDs [4]string + // GIDs of the process (Real, effective, saved set, and filesystem GIDs) + GIDs [4]string +} + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + data, err := util.ReadFileNoStat(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + + s := ProcStatus{PID: p.PID} + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if !bytes.Contains([]byte(line), []byte(":")) { + continue + } + + kv := strings.SplitN(line, ":", 2) + + // removes spaces + k := string(strings.TrimSpace(kv[0])) + v := string(strings.TrimSpace(kv[1])) + // removes "kB" + v = string(bytes.Trim([]byte(v), " kB")) + + // value to int when possible + // we can skip error check here, 'cause vKBytes is not used when value is a string + vKBytes, _ := strconv.ParseUint(v, 10, 64) + // convert kB to B + vBytes := vKBytes * 1024 + + s.fillStatus(k, v, vKBytes, vBytes) + } + + return s, nil +} + +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Tgid": + s.TGID = int(vUint) + case "Name": + s.Name = vString + case "Uid": + copy(s.UIDs[:], strings.Split(vString, "\t")) + case "Gid": + copy(s.GIDs[:], strings.Split(vString, "\t")) + case "VmPeak": + s.VmPeak = vUintBytes + case "VmSize": + s.VmSize = vUintBytes + case "VmLck": + s.VmLck = vUintBytes + case "VmPin": + s.VmPin = vUintBytes + case "VmHWM": + s.VmHWM = vUintBytes + case "VmRSS": + s.VmRSS = vUintBytes + case "RssAnon": + s.RssAnon = vUintBytes + case "RssFile": + s.RssFile = vUintBytes + case "RssShmem": + s.RssShmem = vUintBytes + case "VmData": + s.VmData = vUintBytes + case "VmStk": + s.VmStk = vUintBytes + case "VmExe": + s.VmExe = vUintBytes + case "VmLib": + s.VmLib = vUintBytes + case "VmPTE": + s.VmPTE = vUintBytes + case "VmPMD": + s.VmPMD = vUintBytes + case "VmSwap": + s.VmSwap = vUintBytes + case "HugetlbPages": + s.HugetlbPages = vUintBytes + case "voluntary_ctxt_switches": + s.VoluntaryCtxtSwitches = vUint + case "nonvoluntary_ctxt_switches": + s.NonVoluntaryCtxtSwitches = vUint + } +} + +// TotalCtxtSwitches returns the total context switch. +func (s ProcStatus) TotalCtxtSwitches() uint64 { + return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches +} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go new file mode 100644 index 000000000..28228164e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -0,0 +1,121 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "regexp" + "strconv" +) + +var ( + cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) + procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) +) + +// Schedstat contains scheduler statistics from /proc/schedstat +// +// See +// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt +// for a detailed description of what these numbers mean. +// +// Note the current kernel documentation claims some of the time units are in +// jiffies when they are actually in nanoseconds since 2.6.23 with the +// introduction of CFS. A fix to the documentation is pending. See +// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 +type Schedstat struct { + CPUs []*SchedstatCPU +} + +// SchedstatCPU contains the values from one "cpu" line +type SchedstatCPU struct { + CPUNum string + + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// ProcSchedstat contains the values from /proc//schedstat +type ProcSchedstat struct { + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// Schedstat reads data from /proc/schedstat +func (fs FS) Schedstat() (*Schedstat, error) { + file, err := os.Open(fs.proc.Path("schedstat")) + if err != nil { + return nil, err + } + defer file.Close() + + stats := &Schedstat{} + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + match := cpuLineRE.FindStringSubmatch(scanner.Text()) + if match != nil { + cpu := &SchedstatCPU{} + cpu.CPUNum = match[1] + + cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) + if err != nil { + continue + } + + cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) + if err != nil { + continue + } + + cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) + if err != nil { + continue + } + + stats.CPUs = append(stats.CPUs, cpu) + } + } + + return stats, nil +} + +func parseProcSchedstat(contents string) (ProcSchedstat, error) { + var ( + stats ProcSchedstat + err error + ) + match := procLineRE.FindStringSubmatch(contents) + + if match != nil { + stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) + if err != nil { + return stats, err + } + + stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) + if err != nil { + return stats, err + } + + stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) + return stats, err + } + + return stats, errors.New("could not parse schedstat") +} diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go new file mode 100644 index 000000000..7896fd724 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -0,0 +1,151 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + slabSpace = regexp.MustCompile(`\s+`) + slabVer = regexp.MustCompile(`slabinfo -`) + slabHeader = regexp.MustCompile(`# name`) +) + +// Slab represents a slab pool in the kernel. +type Slab struct { + Name string + ObjActive int64 + ObjNum int64 + ObjSize int64 + ObjPerSlab int64 + PagesPerSlab int64 + // tunables + Limit int64 + Batch int64 + SharedFactor int64 + SlabActive int64 + SlabNum int64 + SharedAvail int64 +} + +// SlabInfo represents info for all slabs. +type SlabInfo struct { + Slabs []*Slab +} + +func shouldParseSlab(line string) bool { + if slabVer.MatchString(line) { + return false + } + if slabHeader.MatchString(line) { + return false + } + return true +} + +// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1. +func parseV21SlabEntry(line string) (*Slab, error) { + // First cleanup whitespace. + l := slabSpace.ReplaceAllString(line, " ") + s := strings.Split(l, " ") + if len(s) != 16 { + return nil, fmt.Errorf("unable to parse: %q", line) + } + var err error + i := &Slab{Name: s[0]} + i.ObjActive, err = strconv.ParseInt(s[1], 10, 64) + if err != nil { + return nil, err + } + i.ObjNum, err = strconv.ParseInt(s[2], 10, 64) + if err != nil { + return nil, err + } + i.ObjSize, err = strconv.ParseInt(s[3], 10, 64) + if err != nil { + return nil, err + } + i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64) + if err != nil { + return nil, err + } + i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64) + if err != nil { + return nil, err + } + i.Limit, err = strconv.ParseInt(s[8], 10, 64) + if err != nil { + return nil, err + } + i.Batch, err = strconv.ParseInt(s[9], 10, 64) + if err != nil { + return nil, err + } + i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64) + if err != nil { + return nil, err + } + i.SlabActive, err = strconv.ParseInt(s[13], 10, 64) + if err != nil { + return nil, err + } + i.SlabNum, err = strconv.ParseInt(s[14], 10, 64) + if err != nil { + return nil, err + } + i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64) + if err != nil { + return nil, err + } + return i, nil +} + +// parseSlabInfo21 is used to parse a slabinfo 2.1 file. +func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { + scanner := bufio.NewScanner(r) + s := SlabInfo{Slabs: []*Slab{}} + for scanner.Scan() { + line := scanner.Text() + if !shouldParseSlab(line) { + continue + } + slab, err := parseV21SlabEntry(line) + if err != nil { + return s, err + } + s.Slabs = append(s.Slabs, slab) + } + return s, nil +} + +// SlabInfo reads data from /proc/slabinfo +func (fs FS) SlabInfo() (SlabInfo, error) { + // TODO: Consider passing options to allow for parsing different + // slabinfo versions. However, slabinfo 2.1 has been stable since + // kernel 2.6.10 and later. + data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo")) + if err != nil { + return SlabInfo{}, err + } + + return parseSlabInfo21(bytes.NewReader(data)) +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 000000000..6d8727541 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,244 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func NewStat() (Stat, error) { + fs, err := NewFS(fs.DefaultProcMountPoint) + if err != nil { + return Stat{}, err + } + return fs.Stat() +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func (fs FS) NewStat() (Stat, error) { + return fs.Stat() +} + +// Stat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Stat() (Stat, error) { + fileName := fs.proc.Path("stat") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Stat{}, err + } + + stat := Stat{} + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go new file mode 100644 index 000000000..15edc2212 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -0,0 +1,89 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Swap represents an entry in /proc/swaps. +type Swap struct { + Filename string + Type string + Size int + Used int + Priority int +} + +// Swaps returns a slice of all configured swap devices on the system. +func (fs FS) Swaps() ([]*Swap, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) + if err != nil { + return nil, err + } + return parseSwaps(data) +} + +func parseSwaps(info []byte) ([]*Swap, error) { + swaps := []*Swap{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + scanner.Scan() // ignore header line + for scanner.Scan() { + swapString := scanner.Text() + parsedSwap, err := parseSwapString(swapString) + if err != nil { + return nil, err + } + swaps = append(swaps, parsedSwap) + } + + err := scanner.Err() + return swaps, err +} + +func parseSwapString(swapString string) (*Swap, error) { + var err error + + swapFields := strings.Fields(swapString) + swapLength := len(swapFields) + if swapLength < 5 { + return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + } + + swap := &Swap{ + Filename: swapFields[0], + Type: swapFields[1], + } + + swap.Size, err = strconv.Atoi(swapFields[2]) + if err != nil { + return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + } + swap.Used, err = strconv.Atoi(swapFields[3]) + if err != nil { + return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + } + swap.Priority, err = strconv.Atoi(swapFields[4]) + if err != nil { + return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + } + + return swap, nil +} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar new file mode 100644 index 000000000..19ef02b8d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ttar @@ -0,0 +1,413 @@ +#!/usr/bin/env bash + +# Purpose: plain text tar format +# Limitations: - only suitable for text files, directories, and symlinks +# - stores only filename, content, and mode +# - not designed for untrusted input +# +# Note: must work with bash version 3.2 (macOS) + +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit -o nounset + +# Sanitize environment (for instance, standard sorting of glob matches) +export LC_ALL=C + +path="" +CMD="" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ + +function usage { + bname=$(basename "$0") + cat << USAGE +Usage: $bname [-C

] -c -f (create archive) + $bname -t -f (list archive contents) + $bname [-C ] -x -f (extract archive) + +Options: + -C (change directory) + -v (verbose) + --recursive-unlink (recursively delete existing directory if path + collides with file or directory to extract) + +Example: Change to sysfs directory, create ttar file from fixtures directory + $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ +USAGE +exit "$1" +} + +function vecho { + if [ "${VERBOSE:-}" == "yes" ]; then + echo >&7 "$@" + fi +} + +function set_cmd { + if [ -n "$CMD" ]; then + echo "ERROR: more than one command given" + echo + usage 2 + fi + CMD=$1 +} + +unset VERBOSE +unset RECURSIVE_UNLINK + +while getopts :cf:-:htxvC: opt; do + case $opt in + c) + set_cmd "create" + ;; + f) + ARCHIVE=$OPTARG + ;; + h) + usage 0 + ;; + t) + set_cmd "list" + ;; + x) + set_cmd "extract" + ;; + v) + VERBOSE=yes + exec 7>&1 + ;; + C) + CDIR=$OPTARG + ;; + -) + case $OPTARG in + recursive-unlink) + RECURSIVE_UNLINK="yes" + ;; + *) + echo -e "Error: invalid option -$OPTARG" + echo + usage 1 + ;; + esac + ;; + *) + echo >&2 "ERROR: invalid option -$OPTARG" + echo + usage 1 + ;; + esac +done + +# Remove processed options from arguments +shift $(( OPTIND - 1 )); + +if [ "${CMD:-}" == "" ]; then + echo >&2 "ERROR: no command given" + echo + usage 1 +elif [ "${ARCHIVE:-}" == "" ]; then + echo >&2 "ERROR: no archive name given" + echo + usage 1 +fi + +function list { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while read -r line; do + line_no=$(( line_no + 1 )) + if [ $size -gt 0 ]; then + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + echo "$path" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + echo "$path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + echo "$path -> ${BASH_REMATCH[1]}" + fi + done < "$ttar_file" +} + +function extract { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while IFS= read -r line; do + line_no=$(( line_no + 1 )) + local eof_without_newline + if [ "$size" -gt 0 ]; then + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + if [ -L "$path" ]; then + rm "$path" + elif [ -d "$path" ]; then + if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then + rm -r "$path" + else + # Safe because symlinks to directories are dealt with above + rmdir "$path" + fi + elif [ -e "$path" ]; then + rm "$path" + fi + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + # Create file even if it is zero-length. + touch "$path" + vecho " $path" + elif [[ $line =~ ^Mode:\ (.*)$ ]]; then + mode=${BASH_REMATCH[1]} + chmod "$mode" "$path" + vecho "$mode" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + mkdir -p "$path" + vecho " $path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + ln -s "${BASH_REMATCH[1]}" "$path" + vecho " $path -> ${BASH_REMATCH[1]}" + elif [[ $line =~ ^# ]]; then + # Ignore comments between files + continue + else + echo >&2 "ERROR: Unknown keyword on line $line_no: $line" + exit 1 + fi + done < "$ttar_file" +} + +function div { + echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ + "- - - - - -" +} + +function get_mode { + local mfile=$1 + if [ -z "${STAT_OPTION:-}" ]; then + if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat + STAT_OPTION='-c' + STAT_FORMAT='%a' + else + # BSD stat + STAT_OPTION='-f' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' + fi + fi + stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" +} + +function _create { + shopt -s nullglob + local mode + local eof_without_newline + while (( "$#" )); do + file=$1 + if [ -L "$file" ]; then + echo "Path: $file" + symlinkTo=$(readlink "$file") + echo "SymlinkTo: $symlinkTo" + vecho " $file -> $symlinkTo" + div + elif [ -d "$file" ]; then + # Strip trailing slash (if there is one) + file=${file%/} + echo "Directory: $file" + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file/" + div + # Find all files and dirs, including hidden/dot files + for x in "$file/"{*,.[^.]*}; do + _create "$x" + done + elif [ -f "$file" ]; then + echo "Path: $file" + lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi + echo "Lines: $lines" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file" + div + else + echo >&2 "ERROR: file not found ($file in $(pwd))" + exit 2 + fi + shift + done +} + +function create { + ttar_file=$1 + shift + if [ -z "${1:-}" ]; then + echo >&2 "ERROR: missing arguments." + echo + usage 1 + fi + if [ -e "$ttar_file" ]; then + rm "$ttar_file" + fi + exec > "$ttar_file" + echo "# Archive created by ttar $ARG_STRING" + _create "$@" +} + +test_environment + +if [ -n "${CDIR:-}" ]; then + if [[ "$ARCHIVE" != /* ]]; then + # Relative path: preserve the archive's location before changing + # directory + ARCHIVE="$(pwd)/$ARCHIVE" + fi + cd "$CDIR" +fi + +"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go new file mode 100644 index 000000000..cb1389141 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// The VM interface is described at +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// Each setting is exposed as a single file. +// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array +// and numa_zonelist_order (deprecated) which is a string +type VM struct { + AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes + BlockDump *int64 // /proc/sys/vm/block_dump + CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed + DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes + DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio + DirtyBytes *int64 // /proc/sys/vm/dirty_bytes + DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs + DirtyRatio *int64 // /proc/sys/vm/dirty_ratio + DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds + DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs + DropCaches *int64 // /proc/sys/vm/drop_caches + ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold + HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group + LaptopMode *int64 // /proc/sys/vm/laptop_mode + LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout + LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio + MaxMapCount *int64 // /proc/sys/vm/max_map_count + MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill + MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery + MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes + MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio + MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio + MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr + NrHugepages *int64 // /proc/sys/vm/nr_hugepages + NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy + NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages + NumaStat *int64 // /proc/sys/vm/numa_stat + NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order + OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks + OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task + OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes + OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory + OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio + PageCluster *int64 // /proc/sys/vm/page-cluster + PanicOnOom *int64 // /proc/sys/vm/panic_on_oom + PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction + StatInterval *int64 // /proc/sys/vm/stat_interval + Swappiness *int64 // /proc/sys/vm/swappiness + UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes + VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure + WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor + WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor + ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode +} + +// VM reads the VM statistics from the specified `proc` filesystem. +func (fs FS) VM() (*VM, error) { + path := fs.proc.Path("sys/vm") + file, err := os.Stat(path) + if err != nil { + return nil, err + } + if !file.Mode().IsDir() { + return nil, fmt.Errorf("%s is not a directory", path) + } + + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var vm VM + for _, f := range files { + if f.IsDir() { + continue + } + + name := filepath.Join(path, f.Name()) + // ignore errors on read, as there are some write only + // in /proc/sys/vm + value, err := util.SysReadFile(name) + if err != nil { + continue + } + vp := util.NewValueParser(value) + + switch f.Name() { + case "admin_reserve_kbytes": + vm.AdminReserveKbytes = vp.PInt64() + case "block_dump": + vm.BlockDump = vp.PInt64() + case "compact_unevictable_allowed": + vm.CompactUnevictableAllowed = vp.PInt64() + case "dirty_background_bytes": + vm.DirtyBackgroundBytes = vp.PInt64() + case "dirty_background_ratio": + vm.DirtyBackgroundRatio = vp.PInt64() + case "dirty_bytes": + vm.DirtyBytes = vp.PInt64() + case "dirty_expire_centisecs": + vm.DirtyExpireCentisecs = vp.PInt64() + case "dirty_ratio": + vm.DirtyRatio = vp.PInt64() + case "dirtytime_expire_seconds": + vm.DirtytimeExpireSeconds = vp.PInt64() + case "dirty_writeback_centisecs": + vm.DirtyWritebackCentisecs = vp.PInt64() + case "drop_caches": + vm.DropCaches = vp.PInt64() + case "extfrag_threshold": + vm.ExtfragThreshold = vp.PInt64() + case "hugetlb_shm_group": + vm.HugetlbShmGroup = vp.PInt64() + case "laptop_mode": + vm.LaptopMode = vp.PInt64() + case "legacy_va_layout": + vm.LegacyVaLayout = vp.PInt64() + case "lowmem_reserve_ratio": + stringSlice := strings.Fields(value) + pint64Slice := make([]*int64, 0, len(stringSlice)) + for _, value := range stringSlice { + vp := util.NewValueParser(value) + pint64Slice = append(pint64Slice, vp.PInt64()) + } + vm.LowmemReserveRatio = pint64Slice + case "max_map_count": + vm.MaxMapCount = vp.PInt64() + case "memory_failure_early_kill": + vm.MemoryFailureEarlyKill = vp.PInt64() + case "memory_failure_recovery": + vm.MemoryFailureRecovery = vp.PInt64() + case "min_free_kbytes": + vm.MinFreeKbytes = vp.PInt64() + case "min_slab_ratio": + vm.MinSlabRatio = vp.PInt64() + case "min_unmapped_ratio": + vm.MinUnmappedRatio = vp.PInt64() + case "mmap_min_addr": + vm.MmapMinAddr = vp.PInt64() + case "nr_hugepages": + vm.NrHugepages = vp.PInt64() + case "nr_hugepages_mempolicy": + vm.NrHugepagesMempolicy = vp.PInt64() + case "nr_overcommit_hugepages": + vm.NrOvercommitHugepages = vp.PInt64() + case "numa_stat": + vm.NumaStat = vp.PInt64() + case "numa_zonelist_order": + vm.NumaZonelistOrder = value + case "oom_dump_tasks": + vm.OomDumpTasks = vp.PInt64() + case "oom_kill_allocating_task": + vm.OomKillAllocatingTask = vp.PInt64() + case "overcommit_kbytes": + vm.OvercommitKbytes = vp.PInt64() + case "overcommit_memory": + vm.OvercommitMemory = vp.PInt64() + case "overcommit_ratio": + vm.OvercommitRatio = vp.PInt64() + case "page-cluster": + vm.PageCluster = vp.PInt64() + case "panic_on_oom": + vm.PanicOnOom = vp.PInt64() + case "percpu_pagelist_fraction": + vm.PercpuPagelistFraction = vp.PInt64() + case "stat_interval": + vm.StatInterval = vp.PInt64() + case "swappiness": + vm.Swappiness = vp.PInt64() + case "user_reserve_kbytes": + vm.UserReserveKbytes = vp.PInt64() + case "vfs_cache_pressure": + vm.VfsCachePressure = vp.PInt64() + case "watermark_boost_factor": + vm.WatermarkBoostFactor = vp.PInt64() + case "watermark_scale_factor": + vm.WatermarkScaleFactor = vp.PInt64() + case "zone_reclaim_mode": + vm.ZoneReclaimMode = vp.PInt64() + } + if err := vp.Err(); err != nil { + return nil, err + } + } + + return &vm, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 000000000..eed07c7d7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,186 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go new file mode 100644 index 000000000..0b9bb6796 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "regexp" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Zoneinfo holds info parsed from /proc/zoneinfo. +type Zoneinfo struct { + Node string + Zone string + NrFreePages *int64 + Min *int64 + Low *int64 + High *int64 + Scanned *int64 + Spanned *int64 + Present *int64 + Managed *int64 + NrActiveAnon *int64 + NrInactiveAnon *int64 + NrIsolatedAnon *int64 + NrAnonPages *int64 + NrAnonTransparentHugepages *int64 + NrActiveFile *int64 + NrInactiveFile *int64 + NrIsolatedFile *int64 + NrFilePages *int64 + NrSlabReclaimable *int64 + NrSlabUnreclaimable *int64 + NrMlockStack *int64 + NrKernelStack *int64 + NrMapped *int64 + NrDirty *int64 + NrWriteback *int64 + NrUnevictable *int64 + NrShmem *int64 + NrDirtied *int64 + NrWritten *int64 + NumaHit *int64 + NumaMiss *int64 + NumaForeign *int64 + NumaInterleave *int64 + NumaLocal *int64 + NumaOther *int64 + Protection []*int64 +} + +var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) + +// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of +// structs containing the relevant info. More information available here: +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +func (fs FS) Zoneinfo() ([]Zoneinfo, error) { + data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + if err != nil { + return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + } + zoneinfo, err := parseZoneinfo(data) + if err != nil { + return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + } + return zoneinfo, nil +} + +func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { + + zoneinfo := []Zoneinfo{} + + zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) + for _, block := range zoneinfoBlocks { + var zoneinfoElement Zoneinfo + lines := strings.Split(string(block), "\n") + for _, line := range lines { + + if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { + zoneinfoElement.Node = nodeZone[1] + zoneinfoElement.Zone = nodeZone[2] + continue + } + if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { + zoneinfoElement.Zone = "" + continue + } + parts := strings.Fields(strings.TrimSpace(line)) + if len(parts) < 2 { + continue + } + vp := util.NewValueParser(parts[1]) + switch parts[0] { + case "nr_free_pages": + zoneinfoElement.NrFreePages = vp.PInt64() + case "min": + zoneinfoElement.Min = vp.PInt64() + case "low": + zoneinfoElement.Low = vp.PInt64() + case "high": + zoneinfoElement.High = vp.PInt64() + case "scanned": + zoneinfoElement.Scanned = vp.PInt64() + case "spanned": + zoneinfoElement.Spanned = vp.PInt64() + case "present": + zoneinfoElement.Present = vp.PInt64() + case "managed": + zoneinfoElement.Managed = vp.PInt64() + case "nr_active_anon": + zoneinfoElement.NrActiveAnon = vp.PInt64() + case "nr_inactive_anon": + zoneinfoElement.NrInactiveAnon = vp.PInt64() + case "nr_isolated_anon": + zoneinfoElement.NrIsolatedAnon = vp.PInt64() + case "nr_anon_pages": + zoneinfoElement.NrAnonPages = vp.PInt64() + case "nr_anon_transparent_hugepages": + zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() + case "nr_active_file": + zoneinfoElement.NrActiveFile = vp.PInt64() + case "nr_inactive_file": + zoneinfoElement.NrInactiveFile = vp.PInt64() + case "nr_isolated_file": + zoneinfoElement.NrIsolatedFile = vp.PInt64() + case "nr_file_pages": + zoneinfoElement.NrFilePages = vp.PInt64() + case "nr_slab_reclaimable": + zoneinfoElement.NrSlabReclaimable = vp.PInt64() + case "nr_slab_unreclaimable": + zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() + case "nr_mlock_stack": + zoneinfoElement.NrMlockStack = vp.PInt64() + case "nr_kernel_stack": + zoneinfoElement.NrKernelStack = vp.PInt64() + case "nr_mapped": + zoneinfoElement.NrMapped = vp.PInt64() + case "nr_dirty": + zoneinfoElement.NrDirty = vp.PInt64() + case "nr_writeback": + zoneinfoElement.NrWriteback = vp.PInt64() + case "nr_unevictable": + zoneinfoElement.NrUnevictable = vp.PInt64() + case "nr_shmem": + zoneinfoElement.NrShmem = vp.PInt64() + case "nr_dirtied": + zoneinfoElement.NrDirtied = vp.PInt64() + case "nr_written": + zoneinfoElement.NrWritten = vp.PInt64() + case "numa_hit": + zoneinfoElement.NumaHit = vp.PInt64() + case "numa_miss": + zoneinfoElement.NumaMiss = vp.PInt64() + case "numa_foreign": + zoneinfoElement.NumaForeign = vp.PInt64() + case "numa_interleave": + zoneinfoElement.NumaInterleave = vp.PInt64() + case "numa_local": + zoneinfoElement.NumaLocal = vp.PInt64() + case "numa_other": + zoneinfoElement.NumaOther = vp.PInt64() + case "protection:": + protectionParts := strings.Split(line, ":") + protectionValues := strings.Replace(protectionParts[1], "(", "", 1) + protectionValues = strings.Replace(protectionValues, ")", "", 1) + protectionValues = strings.TrimSpace(protectionValues) + protectionStringMap := strings.Split(protectionValues, ", ") + val, err := util.ParsePInt64s(protectionStringMap) + if err == nil { + zoneinfoElement.Protection = val + } + } + + } + + zoneinfo = append(zoneinfo, zoneinfoElement) + } + return zoneinfo, nil +} diff --git a/vendor/github.com/rubenv/sql-migrate/go.mod b/vendor/github.com/rubenv/sql-migrate/go.mod deleted file mode 100644 index 9ae35ed70..000000000 --- a/vendor/github.com/rubenv/sql-migrate/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module github.com/rubenv/sql-migrate - -go 1.16 - -require ( - github.com/denisenkom/go-mssqldb v0.9.0 - github.com/go-sql-driver/mysql v1.5.0 - github.com/gobuffalo/packr/v2 v2.8.1 - github.com/godror/godror v0.24.2 - github.com/lib/pq v1.10.0 - github.com/mattn/go-oci8 v0.1.1 - github.com/mattn/go-sqlite3 v1.14.6 - github.com/mitchellh/cli v1.1.2 - github.com/olekukonko/tablewriter v0.0.5 - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c - gopkg.in/gorp.v1 v1.7.2 - gopkg.in/yaml.v2 v2.4.0 -) diff --git a/vendor/github.com/rubenv/sql-migrate/go.sum b/vendor/github.com/rubenv/sql-migrate/go.sum deleted file mode 100644 index 22b1be956..000000000 --- a/vendor/github.com/rubenv/sql-migrate/go.sum +++ /dev/null @@ -1,237 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.9.0 h1:RSohk2RsiZqLZ0zCjtfn3S4Gp4exhpBWHyQ7D0yGjAk= -github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobuffalo/logger v1.0.3 h1:YaXOTHNPCvkqqA7w05A4v0k2tCdpr+sgFlgINbQ6gqc= -github.com/gobuffalo/logger v1.0.3/go.mod h1:SoeejUwldiS7ZsyCBphOGURmWdwUFXs0J7TCjEhjKxM= -github.com/gobuffalo/packd v1.0.0 h1:6ERZvJHfe24rfFmA9OaoKBdC7+c9sydrytMg8SdFGBM= -github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fdec9hrhI= -github.com/gobuffalo/packr/v2 v2.8.1 h1:tkQpju6i3EtMXJ9uoF5GT6kB+LMTimDWD8Xvbz6zDVA= -github.com/gobuffalo/packr/v2 v2.8.1/go.mod h1:c/PLlOuTU+p3SybaJATW3H6lX/iK7xEz5OeMf+NnJpg= -github.com/godror/godror v0.24.2 h1:uxGAD7UdnNGjX5gf4NnEIGw0JAPTIFiqAyRBZTPKwXs= -github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karrick/godirwalk v1.15.8 h1:7+rWAZPn9zuRxaIqqT8Ohs2Q2Ac0msBqwRdxNCr2VVs= -github.com/karrick/godirwalk v1.15.8/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= -github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= -github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= -github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-oci8 v0.1.1 h1:aEUDxNAyDG0tv8CA3TArnDQNyc4EhnWlsfxRgDHABHM= -github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= -gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/steveyen/gtreap/.gitignore b/vendor/github.com/steveyen/gtreap/.gitignore deleted file mode 100644 index 94b2ac31b..000000000 --- a/vendor/github.com/steveyen/gtreap/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -#* -*~ -*.test -tmp - diff --git a/vendor/github.com/steveyen/gtreap/README.md b/vendor/github.com/steveyen/gtreap/README.md deleted file mode 100644 index 4cd8de7c7..000000000 --- a/vendor/github.com/steveyen/gtreap/README.md +++ /dev/null @@ -1,90 +0,0 @@ -gtreap ------- - -gtreap is an immutable treap implementation in the Go Language - -[![GoDoc](https://godoc.org/github.com/steveyen/gtreap?status.svg)](https://godoc.org/github.com/steveyen/gtreap) [![Build Status](https://drone.io/github.com/steveyen/gtreap/status.png)](https://drone.io/github.com/steveyen/gtreap/latest) [![Coverage Status](https://coveralls.io/repos/steveyen/gtreap/badge.png)](https://coveralls.io/r/steveyen/gtreap) - -Overview -======== - -gtreap implements an immutable treap data structure in golang. - -By treap, this data structure is both a heap and a binary search tree. - -By immutable, any updates/deletes to a treap will return a new treap -which can share internal nodes with the previous treap. All nodes in -this implementation are read-only after their creation. This allows -concurrent readers to operate safely with concurrent writers as -modifications only create new data structures and never modify -existing data structures. This is a simple approach to achieving MVCC -or multi-version concurrency control. - -By heap, items in the treap follow the heap-priority property, where a -parent node will have higher priority than its left and right children -nodes. - -By binary search tree, items are store lexigraphically, ordered by a -user-supplied Compare function. - -To get a probabilistic O(lg N) tree height, you should use a random -priority number during the Upsert() operation. - -LICENSE -======= - -MIT - -Example -======= - - import ( - "math/rand" - "github.com/steveyen/gtreap" - ) - - func stringCompare(a, b interface{}) int { - return bytes.Compare([]byte(a.(string)), []byte(b.(string))) - } - - t := gtreap.NewTreap(stringCompare) - t = t.Upsert("hi", rand.Int()) - t = t.Upsert("hola", rand.Int()) - t = t.Upsert("bye", rand.Int()) - t = t.Upsert("adios", rand.Int()) - - hi = t.Get("hi") - bye = t.Get("bye") - - // Some example Delete()'s... - t = t.Delete("bye") - nilValueHere = t.Get("bye") - t2 = t.Delete("hi") - nilValueHere2 = t2.Get("hi") - - // Since we still hold onto treap t, we can still access "hi". - hiStillExistsInTreapT = t.Get("hi") - - t.VisitAscend("cya", func(i Item) bool { - // This visitor callback will be invoked with every item - // from "cya" onwards. So: "hi", "hola". - // If we want to stop visiting, return false; - // otherwise a true return result means keep visiting items. - return true - }) - -Tips -==== - -The Upsert() method takes both an Item (an interface{}) and a heap -priority. Usually, that priority should be a random int -(math/rand.Int()) or perhaps even a hash of the item. However, if you -want to shuffle more commonly accessed items nearer to the top of the -treap for faster access, at the potential cost of not approaching a -probabilistic O(lg N) tree height, then you might tweak the priority. - -See also -======== - -For a simple, ordered, key-value storage or persistence library built -on immutable treaps, see: https://github.com/steveyen/gkvlite diff --git a/vendor/github.com/steveyen/gtreap/go.mod b/vendor/github.com/steveyen/gtreap/go.mod deleted file mode 100644 index 328430b21..000000000 --- a/vendor/github.com/steveyen/gtreap/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/steveyen/gtreap - -go 1.13 diff --git a/vendor/github.com/steveyen/gtreap/treap.go b/vendor/github.com/steveyen/gtreap/treap.go deleted file mode 100644 index f758ffe44..000000000 --- a/vendor/github.com/steveyen/gtreap/treap.go +++ /dev/null @@ -1,188 +0,0 @@ -package gtreap - -type Treap struct { - compare Compare - root *node -} - -// Compare returns an integer comparing the two items -// lexicographically. The result will be 0 if a==b, -1 if a < b, and -// +1 if a > b. -type Compare func(a, b interface{}) int - -// Item can be anything. -type Item interface{} - -type node struct { - item Item - priority int - left *node - right *node -} - -func NewTreap(c Compare) *Treap { - return &Treap{compare: c, root: nil} -} - -func (t *Treap) Min() Item { - n := t.root - if n == nil { - return nil - } - for n.left != nil { - n = n.left - } - return n.item -} - -func (t *Treap) Max() Item { - n := t.root - if n == nil { - return nil - } - for n.right != nil { - n = n.right - } - return n.item -} - -func (t *Treap) Get(target Item) Item { - n := t.root - for n != nil { - c := t.compare(target, n.item) - if c < 0 { - n = n.left - } else if c > 0 { - n = n.right - } else { - return n.item - } - } - return nil -} - -// Note: only the priority of the first insert of an item is used. -// Priorities from future updates on already existing items are -// ignored. To change the priority for an item, you need to do a -// Delete then an Upsert. -func (t *Treap) Upsert(item Item, itemPriority int) *Treap { - r := t.union(t.root, &node{item: item, priority: itemPriority}) - return &Treap{compare: t.compare, root: r} -} - -func (t *Treap) union(this *node, that *node) *node { - if this == nil { - return that - } - if that == nil { - return this - } - if this.priority > that.priority { - left, middle, right := t.split(that, this.item) - if middle == nil { - return &node{ - item: this.item, - priority: this.priority, - left: t.union(this.left, left), - right: t.union(this.right, right), - } - } - return &node{ - item: middle.item, - priority: this.priority, - left: t.union(this.left, left), - right: t.union(this.right, right), - } - } - // We don't use middle because the "that" has precendence. - left, _, right := t.split(this, that.item) - return &node{ - item: that.item, - priority: that.priority, - left: t.union(left, that.left), - right: t.union(right, that.right), - } -} - -// Splits a treap into two treaps based on a split item "s". -// The result tuple-3 means (left, X, right), where X is either... -// nil - meaning the item s was not in the original treap. -// non-nil - returning the node that had item s. -// The tuple-3's left result treap has items < s, -// and the tuple-3's right result treap has items > s. -func (t *Treap) split(n *node, s Item) (*node, *node, *node) { - if n == nil { - return nil, nil, nil - } - c := t.compare(s, n.item) - if c == 0 { - return n.left, n, n.right - } - if c < 0 { - left, middle, right := t.split(n.left, s) - return left, middle, &node{ - item: n.item, - priority: n.priority, - left: right, - right: n.right, - } - } - left, middle, right := t.split(n.right, s) - return &node{ - item: n.item, - priority: n.priority, - left: n.left, - right: left, - }, middle, right -} - -func (t *Treap) Delete(target Item) *Treap { - left, _, right := t.split(t.root, target) - return &Treap{compare: t.compare, root: t.join(left, right)} -} - -// All the items from this are < items from that. -func (t *Treap) join(this *node, that *node) *node { - if this == nil { - return that - } - if that == nil { - return this - } - if this.priority > that.priority { - return &node{ - item: this.item, - priority: this.priority, - left: this.left, - right: t.join(this.right, that), - } - } - return &node{ - item: that.item, - priority: that.priority, - left: t.join(this, that.left), - right: that.right, - } -} - -type ItemVisitor func(i Item) bool - -// Visit items greater-than-or-equal to the pivot. -func (t *Treap) VisitAscend(pivot Item, visitor ItemVisitor) { - t.visitAscend(t.root, pivot, visitor) -} - -func (t *Treap) visitAscend(n *node, pivot Item, visitor ItemVisitor) bool { - if n == nil { - return true - } - if t.compare(pivot, n.item) <= 0 { - if !t.visitAscend(n.left, pivot, visitor) { - return false - } - if !visitor(n.item) { - return false - } - } - return t.visitAscend(n.right, pivot, visitor) -} diff --git a/vendor/github.com/tinylib/msgp/LICENSE b/vendor/github.com/tinylib/msgp/LICENSE deleted file mode 100644 index 14d60424e..000000000 --- a/vendor/github.com/tinylib/msgp/LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2014 Philip Hofer -Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go deleted file mode 100644 index 6c6bb37a5..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build linux,!appengine - -package msgp - -import ( - "os" - "syscall" -) - -func adviseRead(mem []byte) { - syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) -} - -func adviseWrite(mem []byte) { - syscall.Madvise(mem, syscall.MADV_SEQUENTIAL) -} - -func fallocate(f *os.File, sz int64) error { - err := syscall.Fallocate(int(f.Fd()), 0, 0, sz) - if err == syscall.ENOTSUP { - return f.Truncate(sz) - } - return err -} diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go deleted file mode 100644 index da65ea541..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/advise_other.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux appengine - -package msgp - -import ( - "os" -) - -// TODO: darwin, BSD support - -func adviseRead(mem []byte) {} - -func adviseWrite(mem []byte) {} - -func fallocate(f *os.File, sz int64) error { - return f.Truncate(sz) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go deleted file mode 100644 index a0434c7ea..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/circular.go +++ /dev/null @@ -1,39 +0,0 @@ -package msgp - -type timer interface { - StartTimer() - StopTimer() -} - -// EndlessReader is an io.Reader -// that loops over the same data -// endlessly. It is used for benchmarking. -type EndlessReader struct { - tb timer - data []byte - offset int -} - -// NewEndlessReader returns a new endless reader -func NewEndlessReader(b []byte, tb timer) *EndlessReader { - return &EndlessReader{tb: tb, data: b, offset: 0} -} - -// Read implements io.Reader. In practice, it -// always returns (len(p), nil), although it -// fills the supplied slice while the benchmark -// timer is stopped. -func (c *EndlessReader) Read(p []byte) (int, error) { - c.tb.StopTimer() - var n int - l := len(p) - m := len(c.data) - for n < l { - nn := copy(p[n:], c.data[c.offset:]) - n += nn - c.offset += nn - c.offset %= m - } - c.tb.StartTimer() - return n, nil -} diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go deleted file mode 100644 index c634eef1d..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/defs.go +++ /dev/null @@ -1,142 +0,0 @@ -// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp). -// -// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack -// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code -// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces. -// -// This package defines four "families" of functions: -// - AppendXxxx() appends an object to a []byte in MessagePack encoding. -// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes. -// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type. -// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type. -// -// Once a type has satisfied the `Encodable` and `Decodable` interfaces, -// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using -// msgp.Encode(io.Writer, msgp.Encodable) -// and -// msgp.Decode(io.Reader, msgp.Decodable) -// -// There are also methods for converting MessagePack to JSON without -// an explicit de-serialization step. -// -// For additional tips, tricks, and gotchas, please visit -// the wiki at http://github.com/tinylib/msgp -package msgp - -const last4 = 0x0f -const first4 = 0xf0 -const last5 = 0x1f -const first3 = 0xe0 -const last7 = 0x7f - -func isfixint(b byte) bool { - return b>>7 == 0 -} - -func isnfixint(b byte) bool { - return b&first3 == mnfixint -} - -func isfixmap(b byte) bool { - return b&first4 == mfixmap -} - -func isfixarray(b byte) bool { - return b&first4 == mfixarray -} - -func isfixstr(b byte) bool { - return b&first3 == mfixstr -} - -func wfixint(u uint8) byte { - return u & last7 -} - -func rfixint(b byte) uint8 { - return b -} - -func wnfixint(i int8) byte { - return byte(i) | mnfixint -} - -func rnfixint(b byte) int8 { - return int8(b) -} - -func rfixmap(b byte) uint8 { - return b & last4 -} - -func wfixmap(u uint8) byte { - return mfixmap | (u & last4) -} - -func rfixstr(b byte) uint8 { - return b & last5 -} - -func wfixstr(u uint8) byte { - return (u & last5) | mfixstr -} - -func rfixarray(b byte) uint8 { - return (b & last4) -} - -func wfixarray(u uint8) byte { - return (u & last4) | mfixarray -} - -// These are all the byte -// prefixes defined by the -// msgpack standard -const ( - // 0XXXXXXX - mfixint uint8 = 0x00 - - // 111XXXXX - mnfixint uint8 = 0xe0 - - // 1000XXXX - mfixmap uint8 = 0x80 - - // 1001XXXX - mfixarray uint8 = 0x90 - - // 101XXXXX - mfixstr uint8 = 0xa0 - - mnil uint8 = 0xc0 - mfalse uint8 = 0xc2 - mtrue uint8 = 0xc3 - mbin8 uint8 = 0xc4 - mbin16 uint8 = 0xc5 - mbin32 uint8 = 0xc6 - mext8 uint8 = 0xc7 - mext16 uint8 = 0xc8 - mext32 uint8 = 0xc9 - mfloat32 uint8 = 0xca - mfloat64 uint8 = 0xcb - muint8 uint8 = 0xcc - muint16 uint8 = 0xcd - muint32 uint8 = 0xce - muint64 uint8 = 0xcf - mint8 uint8 = 0xd0 - mint16 uint8 = 0xd1 - mint32 uint8 = 0xd2 - mint64 uint8 = 0xd3 - mfixext1 uint8 = 0xd4 - mfixext2 uint8 = 0xd5 - mfixext4 uint8 = 0xd6 - mfixext8 uint8 = 0xd7 - mfixext16 uint8 = 0xd8 - mstr8 uint8 = 0xd9 - mstr16 uint8 = 0xda - mstr32 uint8 = 0xdb - marray16 uint8 = 0xdc - marray32 uint8 = 0xdd - mmap16 uint8 = 0xde - mmap32 uint8 = 0xdf -) diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go deleted file mode 100644 index b473a6f66..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/edit.go +++ /dev/null @@ -1,242 +0,0 @@ -package msgp - -import ( - "math" -) - -// Locate returns a []byte pointing to the field -// in a messagepack map with the provided key. (The returned []byte -// points to a sub-slice of 'raw'; Locate does no allocations.) If the -// key doesn't exist in the map, a zero-length []byte will be returned. -func Locate(key string, raw []byte) []byte { - s, n := locate(raw, key) - return raw[s:n] -} - -// Replace takes a key ("key") in a messagepack map ("raw") -// and replaces its value with the one provided and returns -// the new []byte. The returned []byte may point to the same -// memory as "raw". Replace makes no effort to evaluate the validity -// of the contents of 'val'. It may use up to the full capacity of 'raw.' -// Replace returns 'nil' if the field doesn't exist or if the object in 'raw' -// is not a map. -func Replace(key string, raw []byte, val []byte) []byte { - start, end := locate(raw, key) - if start == end { - return nil - } - return replace(raw, start, end, val, true) -} - -// CopyReplace works similarly to Replace except that the returned -// byte slice does not point to the same memory as 'raw'. CopyReplace -// returns 'nil' if the field doesn't exist or 'raw' isn't a map. -func CopyReplace(key string, raw []byte, val []byte) []byte { - start, end := locate(raw, key) - if start == end { - return nil - } - return replace(raw, start, end, val, false) -} - -// Remove removes a key-value pair from 'raw'. It returns -// 'raw' unchanged if the key didn't exist. -func Remove(key string, raw []byte) []byte { - start, end := locateKV(raw, key) - if start == end { - return raw - } - raw = raw[:start+copy(raw[start:], raw[end:])] - return resizeMap(raw, -1) -} - -// HasKey returns whether the map in 'raw' has -// a field with key 'key' -func HasKey(key string, raw []byte) bool { - sz, bts, err := ReadMapHeaderBytes(raw) - if err != nil { - return false - } - var field []byte - for i := uint32(0); i < sz; i++ { - field, bts, err = ReadStringZC(bts) - if err != nil { - return false - } - if UnsafeString(field) == key { - return true - } - } - return false -} - -func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte { - ll := end - start // length of segment to replace - lv := len(val) - - if inplace { - extra := lv - ll - - // fastest case: we're doing - // a 1:1 replacement - if extra == 0 { - copy(raw[start:], val) - return raw - - } else if extra < 0 { - // 'val' smaller than replaced value - // copy in place and shift back - - x := copy(raw[start:], val) - y := copy(raw[start+x:], raw[end:]) - return raw[:start+x+y] - - } else if extra < cap(raw)-len(raw) { - // 'val' less than (cap-len) extra bytes - // copy in place and shift forward - raw = raw[0 : len(raw)+extra] - // shift end forward - copy(raw[end+extra:], raw[end:]) - copy(raw[start:], val) - return raw - } - } - - // we have to allocate new space - out := make([]byte, len(raw)+len(val)-ll) - x := copy(out, raw[:start]) - y := copy(out[x:], val) - copy(out[x+y:], raw[end:]) - return out -} - -// locate does a naive O(n) search for the map key; returns start, end -// (returns 0,0 on error) -func locate(raw []byte, key string) (start int, end int) { - var ( - sz uint32 - bts []byte - field []byte - err error - ) - sz, bts, err = ReadMapHeaderBytes(raw) - if err != nil { - return - } - - // loop and locate field - for i := uint32(0); i < sz; i++ { - field, bts, err = ReadStringZC(bts) - if err != nil { - return 0, 0 - } - if UnsafeString(field) == key { - // start location - l := len(raw) - start = l - len(bts) - bts, err = Skip(bts) - if err != nil { - return 0, 0 - } - end = l - len(bts) - return - } - bts, err = Skip(bts) - if err != nil { - return 0, 0 - } - } - return 0, 0 -} - -// locate key AND value -func locateKV(raw []byte, key string) (start int, end int) { - var ( - sz uint32 - bts []byte - field []byte - err error - ) - sz, bts, err = ReadMapHeaderBytes(raw) - if err != nil { - return 0, 0 - } - - for i := uint32(0); i < sz; i++ { - tmp := len(bts) - field, bts, err = ReadStringZC(bts) - if err != nil { - return 0, 0 - } - if UnsafeString(field) == key { - start = len(raw) - tmp - bts, err = Skip(bts) - if err != nil { - return 0, 0 - } - end = len(raw) - len(bts) - return - } - bts, err = Skip(bts) - if err != nil { - return 0, 0 - } - } - return 0, 0 -} - -// delta is delta on map size -func resizeMap(raw []byte, delta int64) []byte { - var sz int64 - switch raw[0] { - case mmap16: - sz = int64(big.Uint16(raw[1:])) - if sz+delta <= math.MaxUint16 { - big.PutUint16(raw[1:], uint16(sz+delta)) - return raw - } - if cap(raw)-len(raw) >= 2 { - raw = raw[0 : len(raw)+2] - copy(raw[5:], raw[3:]) - raw[0] = mmap32 - big.PutUint32(raw[1:], uint32(sz+delta)) - return raw - } - n := make([]byte, 0, len(raw)+5) - n = AppendMapHeader(n, uint32(sz+delta)) - return append(n, raw[3:]...) - - case mmap32: - sz = int64(big.Uint32(raw[1:])) - big.PutUint32(raw[1:], uint32(sz+delta)) - return raw - - default: - sz = int64(rfixmap(raw[0])) - if sz+delta < 16 { - raw[0] = wfixmap(uint8(sz + delta)) - return raw - } else if sz+delta <= math.MaxUint16 { - if cap(raw)-len(raw) >= 2 { - raw = raw[0 : len(raw)+2] - copy(raw[3:], raw[1:]) - raw[0] = mmap16 - big.PutUint16(raw[1:], uint16(sz+delta)) - return raw - } - n := make([]byte, 0, len(raw)+5) - n = AppendMapHeader(n, uint32(sz+delta)) - return append(n, raw[1:]...) - } - if cap(raw)-len(raw) >= 4 { - raw = raw[0 : len(raw)+4] - copy(raw[5:], raw[1:]) - raw[0] = mmap32 - big.PutUint32(raw[1:], uint32(sz+delta)) - return raw - } - n := make([]byte, 0, len(raw)+5) - n = AppendMapHeader(n, uint32(sz+delta)) - return append(n, raw[1:]...) - } -} diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize.go b/vendor/github.com/tinylib/msgp/msgp/elsize.go deleted file mode 100644 index 95762e7ee..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/elsize.go +++ /dev/null @@ -1,99 +0,0 @@ -package msgp - -// size of every object on the wire, -// plus type information. gives us -// constant-time type information -// for traversing composite objects. -// -var sizes = [256]bytespec{ - mnil: {size: 1, extra: constsize, typ: NilType}, - mfalse: {size: 1, extra: constsize, typ: BoolType}, - mtrue: {size: 1, extra: constsize, typ: BoolType}, - mbin8: {size: 2, extra: extra8, typ: BinType}, - mbin16: {size: 3, extra: extra16, typ: BinType}, - mbin32: {size: 5, extra: extra32, typ: BinType}, - mext8: {size: 3, extra: extra8, typ: ExtensionType}, - mext16: {size: 4, extra: extra16, typ: ExtensionType}, - mext32: {size: 6, extra: extra32, typ: ExtensionType}, - mfloat32: {size: 5, extra: constsize, typ: Float32Type}, - mfloat64: {size: 9, extra: constsize, typ: Float64Type}, - muint8: {size: 2, extra: constsize, typ: UintType}, - muint16: {size: 3, extra: constsize, typ: UintType}, - muint32: {size: 5, extra: constsize, typ: UintType}, - muint64: {size: 9, extra: constsize, typ: UintType}, - mint8: {size: 2, extra: constsize, typ: IntType}, - mint16: {size: 3, extra: constsize, typ: IntType}, - mint32: {size: 5, extra: constsize, typ: IntType}, - mint64: {size: 9, extra: constsize, typ: IntType}, - mfixext1: {size: 3, extra: constsize, typ: ExtensionType}, - mfixext2: {size: 4, extra: constsize, typ: ExtensionType}, - mfixext4: {size: 6, extra: constsize, typ: ExtensionType}, - mfixext8: {size: 10, extra: constsize, typ: ExtensionType}, - mfixext16: {size: 18, extra: constsize, typ: ExtensionType}, - mstr8: {size: 2, extra: extra8, typ: StrType}, - mstr16: {size: 3, extra: extra16, typ: StrType}, - mstr32: {size: 5, extra: extra32, typ: StrType}, - marray16: {size: 3, extra: array16v, typ: ArrayType}, - marray32: {size: 5, extra: array32v, typ: ArrayType}, - mmap16: {size: 3, extra: map16v, typ: MapType}, - mmap32: {size: 5, extra: map32v, typ: MapType}, -} - -func init() { - // set up fixed fields - - // fixint - for i := mfixint; i < 0x80; i++ { - sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType} - } - - // nfixint - for i := uint16(mnfixint); i < 0x100; i++ { - sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType} - } - - // fixstr gets constsize, - // since the prefix yields the size - for i := mfixstr; i < 0xc0; i++ { - sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType} - } - - // fixmap - for i := mfixmap; i < 0x90; i++ { - sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType} - } - - // fixarray - for i := mfixarray; i < 0xa0; i++ { - sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType} - } -} - -// a valid bytespsec has -// non-zero 'size' and -// non-zero 'typ' -type bytespec struct { - size uint8 // prefix size information - extra varmode // extra size information - typ Type // type - _ byte // makes bytespec 4 bytes (yes, this matters) -} - -// size mode -// if positive, # elements for composites -type varmode int8 - -const ( - constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects) - extra8 = -1 // has uint8(p[1]) extra bytes - extra16 = -2 // has be16(p[1:]) extra bytes - extra32 = -3 // has be32(p[1:]) extra bytes - map16v = -4 // use map16 - map32v = -5 // use map32 - array16v = -6 // use array16 - array32v = -7 // use array32 -) - -func getType(v byte) Type { - return sizes[v].typ -} diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go deleted file mode 100644 index cc78a980c..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/errors.go +++ /dev/null @@ -1,314 +0,0 @@ -package msgp - -import ( - "fmt" - "reflect" -) - -const resumableDefault = false - -var ( - // ErrShortBytes is returned when the - // slice being decoded is too short to - // contain the contents of the message - ErrShortBytes error = errShort{} - - // this error is only returned - // if we reach code that should - // be unreachable - fatal error = errFatal{} -) - -// Error is the interface satisfied -// by all of the errors that originate -// from this package. -type Error interface { - error - - // Resumable returns whether - // or not the error means that - // the stream of data is malformed - // and the information is unrecoverable. - Resumable() bool -} - -// contextError allows msgp Error instances to be enhanced with additional -// context about their origin. -type contextError interface { - Error - - // withContext must not modify the error instance - it must clone and - // return a new error with the context added. - withContext(ctx string) error -} - -// Cause returns the underlying cause of an error that has been wrapped -// with additional context. -func Cause(e error) error { - out := e - if e, ok := e.(errWrapped); ok && e.cause != nil { - out = e.cause - } - return out -} - -// Resumable returns whether or not the error means that the stream of data is -// malformed and the information is unrecoverable. -func Resumable(e error) bool { - if e, ok := e.(Error); ok { - return e.Resumable() - } - return resumableDefault -} - -// WrapError wraps an error with additional context that allows the part of the -// serialized type that caused the problem to be identified. Underlying errors -// can be retrieved using Cause() -// -// The input error is not modified - a new error should be returned. -// -// ErrShortBytes is not wrapped with any context due to backward compatibility -// issues with the public API. -// -func WrapError(err error, ctx ...interface{}) error { - switch e := err.(type) { - case errShort: - return e - case contextError: - return e.withContext(ctxString(ctx)) - default: - return errWrapped{cause: err, ctx: ctxString(ctx)} - } -} - -// ctxString converts the incoming interface{} slice into a single string. -func ctxString(ctx []interface{}) string { - out := "" - for idx, cv := range ctx { - if idx > 0 { - out += "/" - } - out += fmt.Sprintf("%v", cv) - } - return out -} - -func addCtx(ctx, add string) string { - if ctx != "" { - return add + "/" + ctx - } else { - return add - } -} - -// errWrapped allows arbitrary errors passed to WrapError to be enhanced with -// context and unwrapped with Cause() -type errWrapped struct { - cause error - ctx string -} - -func (e errWrapped) Error() string { - if e.ctx != "" { - return fmt.Sprintf("%s at %s", e.cause, e.ctx) - } else { - return e.cause.Error() - } -} - -func (e errWrapped) Resumable() bool { - if e, ok := e.cause.(Error); ok { - return e.Resumable() - } - return resumableDefault -} - -type errShort struct{} - -func (e errShort) Error() string { return "msgp: too few bytes left to read object" } -func (e errShort) Resumable() bool { return false } - -type errFatal struct { - ctx string -} - -func (f errFatal) Error() string { - out := "msgp: fatal decoding error (unreachable code)" - if f.ctx != "" { - out += " at " + f.ctx - } - return out -} - -func (f errFatal) Resumable() bool { return false } - -func (f errFatal) withContext(ctx string) error { f.ctx = addCtx(f.ctx, ctx); return f } - -// ArrayError is an error returned -// when decoding a fix-sized array -// of the wrong size -type ArrayError struct { - Wanted uint32 - Got uint32 - ctx string -} - -// Error implements the error interface -func (a ArrayError) Error() string { - out := fmt.Sprintf("msgp: wanted array of size %d; got %d", a.Wanted, a.Got) - if a.ctx != "" { - out += " at " + a.ctx - } - return out -} - -// Resumable is always 'true' for ArrayErrors -func (a ArrayError) Resumable() bool { return true } - -func (a ArrayError) withContext(ctx string) error { a.ctx = addCtx(a.ctx, ctx); return a } - -// IntOverflow is returned when a call -// would downcast an integer to a type -// with too few bits to hold its value. -type IntOverflow struct { - Value int64 // the value of the integer - FailedBitsize int // the bit size that the int64 could not fit into - ctx string -} - -// Error implements the error interface -func (i IntOverflow) Error() string { - str := fmt.Sprintf("msgp: %d overflows int%d", i.Value, i.FailedBitsize) - if i.ctx != "" { - str += " at " + i.ctx - } - return str -} - -// Resumable is always 'true' for overflows -func (i IntOverflow) Resumable() bool { return true } - -func (i IntOverflow) withContext(ctx string) error { i.ctx = addCtx(i.ctx, ctx); return i } - -// UintOverflow is returned when a call -// would downcast an unsigned integer to a type -// with too few bits to hold its value -type UintOverflow struct { - Value uint64 // value of the uint - FailedBitsize int // the bit size that couldn't fit the value - ctx string -} - -// Error implements the error interface -func (u UintOverflow) Error() string { - str := fmt.Sprintf("msgp: %d overflows uint%d", u.Value, u.FailedBitsize) - if u.ctx != "" { - str += " at " + u.ctx - } - return str -} - -// Resumable is always 'true' for overflows -func (u UintOverflow) Resumable() bool { return true } - -func (u UintOverflow) withContext(ctx string) error { u.ctx = addCtx(u.ctx, ctx); return u } - -// UintBelowZero is returned when a call -// would cast a signed integer below zero -// to an unsigned integer. -type UintBelowZero struct { - Value int64 // value of the incoming int - ctx string -} - -// Error implements the error interface -func (u UintBelowZero) Error() string { - str := fmt.Sprintf("msgp: attempted to cast int %d to unsigned", u.Value) - if u.ctx != "" { - str += " at " + u.ctx - } - return str -} - -// Resumable is always 'true' for overflows -func (u UintBelowZero) Resumable() bool { return true } - -func (u UintBelowZero) withContext(ctx string) error { - u.ctx = ctx - return u -} - -// A TypeError is returned when a particular -// decoding method is unsuitable for decoding -// a particular MessagePack value. -type TypeError struct { - Method Type // Type expected by method - Encoded Type // Type actually encoded - - ctx string -} - -// Error implements the error interface -func (t TypeError) Error() string { - out := fmt.Sprintf("msgp: attempted to decode type %q with method for %q", t.Encoded, t.Method) - if t.ctx != "" { - out += " at " + t.ctx - } - return out -} - -// Resumable returns 'true' for TypeErrors -func (t TypeError) Resumable() bool { return true } - -func (t TypeError) withContext(ctx string) error { t.ctx = addCtx(t.ctx, ctx); return t } - -// returns either InvalidPrefixError or -// TypeError depending on whether or not -// the prefix is recognized -func badPrefix(want Type, lead byte) error { - t := sizes[lead].typ - if t == InvalidType { - return InvalidPrefixError(lead) - } - return TypeError{Method: want, Encoded: t} -} - -// InvalidPrefixError is returned when a bad encoding -// uses a prefix that is not recognized in the MessagePack standard. -// This kind of error is unrecoverable. -type InvalidPrefixError byte - -// Error implements the error interface -func (i InvalidPrefixError) Error() string { - return fmt.Sprintf("msgp: unrecognized type prefix 0x%x", byte(i)) -} - -// Resumable returns 'false' for InvalidPrefixErrors -func (i InvalidPrefixError) Resumable() bool { return false } - -// ErrUnsupportedType is returned -// when a bad argument is supplied -// to a function that takes `interface{}`. -type ErrUnsupportedType struct { - T reflect.Type - - ctx string -} - -// Error implements error -func (e *ErrUnsupportedType) Error() string { - out := fmt.Sprintf("msgp: type %q not supported", e.T) - if e.ctx != "" { - out += " at " + e.ctx - } - return out -} - -// Resumable returns 'true' for ErrUnsupportedType -func (e *ErrUnsupportedType) Resumable() bool { return true } - -func (e *ErrUnsupportedType) withContext(ctx string) error { - o := *e - o.ctx = addCtx(o.ctx, ctx) - return &o -} diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go deleted file mode 100644 index b2e110851..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/extension.go +++ /dev/null @@ -1,549 +0,0 @@ -package msgp - -import ( - "fmt" - "math" -) - -const ( - // Complex64Extension is the extension number used for complex64 - Complex64Extension = 3 - - // Complex128Extension is the extension number used for complex128 - Complex128Extension = 4 - - // TimeExtension is the extension number used for time.Time - TimeExtension = 5 -) - -// our extensions live here -var extensionReg = make(map[int8]func() Extension) - -// RegisterExtension registers extensions so that they -// can be initialized and returned by methods that -// decode `interface{}` values. This should only -// be called during initialization. f() should return -// a newly-initialized zero value of the extension. Keep in -// mind that extensions 3, 4, and 5 are reserved for -// complex64, complex128, and time.Time, respectively, -// and that MessagePack reserves extension types from -127 to -1. -// -// For example, if you wanted to register a user-defined struct: -// -// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} }) -// -// RegisterExtension will panic if you call it multiple times -// with the same 'typ' argument, or if you use a reserved -// type (3, 4, or 5). -func RegisterExtension(typ int8, f func() Extension) { - switch typ { - case Complex64Extension, Complex128Extension, TimeExtension: - panic(fmt.Sprint("msgp: forbidden extension type:", typ)) - } - if _, ok := extensionReg[typ]; ok { - panic(fmt.Sprint("msgp: RegisterExtension() called with typ", typ, "more than once")) - } - extensionReg[typ] = f -} - -// ExtensionTypeError is an error type returned -// when there is a mis-match between an extension type -// and the type encoded on the wire -type ExtensionTypeError struct { - Got int8 - Want int8 -} - -// Error implements the error interface -func (e ExtensionTypeError) Error() string { - return fmt.Sprintf("msgp: error decoding extension: wanted type %d; got type %d", e.Want, e.Got) -} - -// Resumable returns 'true' for ExtensionTypeErrors -func (e ExtensionTypeError) Resumable() bool { return true } - -func errExt(got int8, wanted int8) error { - return ExtensionTypeError{Got: got, Want: wanted} -} - -// Extension is the interface fulfilled -// by types that want to define their -// own binary encoding. -type Extension interface { - // ExtensionType should return - // a int8 that identifies the concrete - // type of the extension. (Types <0 are - // officially reserved by the MessagePack - // specifications.) - ExtensionType() int8 - - // Len should return the length - // of the data to be encoded - Len() int - - // MarshalBinaryTo should copy - // the data into the supplied slice, - // assuming that the slice has length Len() - MarshalBinaryTo([]byte) error - - UnmarshalBinary([]byte) error -} - -// RawExtension implements the Extension interface -type RawExtension struct { - Data []byte - Type int8 -} - -// ExtensionType implements Extension.ExtensionType, and returns r.Type -func (r *RawExtension) ExtensionType() int8 { return r.Type } - -// Len implements Extension.Len, and returns len(r.Data) -func (r *RawExtension) Len() int { return len(r.Data) } - -// MarshalBinaryTo implements Extension.MarshalBinaryTo, -// and returns a copy of r.Data -func (r *RawExtension) MarshalBinaryTo(d []byte) error { - copy(d, r.Data) - return nil -} - -// UnmarshalBinary implements Extension.UnmarshalBinary, -// and sets r.Data to the contents of the provided slice -func (r *RawExtension) UnmarshalBinary(b []byte) error { - if cap(r.Data) >= len(b) { - r.Data = r.Data[0:len(b)] - } else { - r.Data = make([]byte, len(b)) - } - copy(r.Data, b) - return nil -} - -// WriteExtension writes an extension type to the writer -func (mw *Writer) WriteExtension(e Extension) error { - l := e.Len() - var err error - switch l { - case 0: - o, err := mw.require(3) - if err != nil { - return err - } - mw.buf[o] = mext8 - mw.buf[o+1] = 0 - mw.buf[o+2] = byte(e.ExtensionType()) - case 1: - o, err := mw.require(2) - if err != nil { - return err - } - mw.buf[o] = mfixext1 - mw.buf[o+1] = byte(e.ExtensionType()) - case 2: - o, err := mw.require(2) - if err != nil { - return err - } - mw.buf[o] = mfixext2 - mw.buf[o+1] = byte(e.ExtensionType()) - case 4: - o, err := mw.require(2) - if err != nil { - return err - } - mw.buf[o] = mfixext4 - mw.buf[o+1] = byte(e.ExtensionType()) - case 8: - o, err := mw.require(2) - if err != nil { - return err - } - mw.buf[o] = mfixext8 - mw.buf[o+1] = byte(e.ExtensionType()) - case 16: - o, err := mw.require(2) - if err != nil { - return err - } - mw.buf[o] = mfixext16 - mw.buf[o+1] = byte(e.ExtensionType()) - default: - switch { - case l < math.MaxUint8: - o, err := mw.require(3) - if err != nil { - return err - } - mw.buf[o] = mext8 - mw.buf[o+1] = byte(uint8(l)) - mw.buf[o+2] = byte(e.ExtensionType()) - case l < math.MaxUint16: - o, err := mw.require(4) - if err != nil { - return err - } - mw.buf[o] = mext16 - big.PutUint16(mw.buf[o+1:], uint16(l)) - mw.buf[o+3] = byte(e.ExtensionType()) - default: - o, err := mw.require(6) - if err != nil { - return err - } - mw.buf[o] = mext32 - big.PutUint32(mw.buf[o+1:], uint32(l)) - mw.buf[o+5] = byte(e.ExtensionType()) - } - } - // we can only write directly to the - // buffer if we're sure that it - // fits the object - if l <= mw.bufsize() { - o, err := mw.require(l) - if err != nil { - return err - } - return e.MarshalBinaryTo(mw.buf[o:]) - } - // here we create a new buffer - // just large enough for the body - // and save it as the write buffer - err = mw.flush() - if err != nil { - return err - } - buf := make([]byte, l) - err = e.MarshalBinaryTo(buf) - if err != nil { - return err - } - mw.buf = buf - mw.wloc = l - return nil -} - -// peek at the extension type, assuming the next -// kind to be read is Extension -func (m *Reader) peekExtensionType() (int8, error) { - p, err := m.R.Peek(2) - if err != nil { - return 0, err - } - spec := sizes[p[0]] - if spec.typ != ExtensionType { - return 0, badPrefix(ExtensionType, p[0]) - } - if spec.extra == constsize { - return int8(p[1]), nil - } - size := spec.size - p, err = m.R.Peek(int(size)) - if err != nil { - return 0, err - } - return int8(p[size-1]), nil -} - -// peekExtension peeks at the extension encoding type -// (must guarantee at least 1 byte in 'b') -func peekExtension(b []byte) (int8, error) { - spec := sizes[b[0]] - size := spec.size - if spec.typ != ExtensionType { - return 0, badPrefix(ExtensionType, b[0]) - } - if len(b) < int(size) { - return 0, ErrShortBytes - } - // for fixed extensions, - // the type information is in - // the second byte - if spec.extra == constsize { - return int8(b[1]), nil - } - // otherwise, it's in the last - // part of the prefix - return int8(b[size-1]), nil -} - -// ReadExtension reads the next object from the reader -// as an extension. ReadExtension will fail if the next -// object in the stream is not an extension, or if -// e.Type() is not the same as the wire type. -func (m *Reader) ReadExtension(e Extension) (err error) { - var p []byte - p, err = m.R.Peek(2) - if err != nil { - return - } - lead := p[0] - var read int - var off int - switch lead { - case mfixext1: - if int8(p[1]) != e.ExtensionType() { - err = errExt(int8(p[1]), e.ExtensionType()) - return - } - p, err = m.R.Peek(3) - if err != nil { - return - } - err = e.UnmarshalBinary(p[2:]) - if err == nil { - _, err = m.R.Skip(3) - } - return - - case mfixext2: - if int8(p[1]) != e.ExtensionType() { - err = errExt(int8(p[1]), e.ExtensionType()) - return - } - p, err = m.R.Peek(4) - if err != nil { - return - } - err = e.UnmarshalBinary(p[2:]) - if err == nil { - _, err = m.R.Skip(4) - } - return - - case mfixext4: - if int8(p[1]) != e.ExtensionType() { - err = errExt(int8(p[1]), e.ExtensionType()) - return - } - p, err = m.R.Peek(6) - if err != nil { - return - } - err = e.UnmarshalBinary(p[2:]) - if err == nil { - _, err = m.R.Skip(6) - } - return - - case mfixext8: - if int8(p[1]) != e.ExtensionType() { - err = errExt(int8(p[1]), e.ExtensionType()) - return - } - p, err = m.R.Peek(10) - if err != nil { - return - } - err = e.UnmarshalBinary(p[2:]) - if err == nil { - _, err = m.R.Skip(10) - } - return - - case mfixext16: - if int8(p[1]) != e.ExtensionType() { - err = errExt(int8(p[1]), e.ExtensionType()) - return - } - p, err = m.R.Peek(18) - if err != nil { - return - } - err = e.UnmarshalBinary(p[2:]) - if err == nil { - _, err = m.R.Skip(18) - } - return - - case mext8: - p, err = m.R.Peek(3) - if err != nil { - return - } - if int8(p[2]) != e.ExtensionType() { - err = errExt(int8(p[2]), e.ExtensionType()) - return - } - read = int(uint8(p[1])) - off = 3 - - case mext16: - p, err = m.R.Peek(4) - if err != nil { - return - } - if int8(p[3]) != e.ExtensionType() { - err = errExt(int8(p[3]), e.ExtensionType()) - return - } - read = int(big.Uint16(p[1:])) - off = 4 - - case mext32: - p, err = m.R.Peek(6) - if err != nil { - return - } - if int8(p[5]) != e.ExtensionType() { - err = errExt(int8(p[5]), e.ExtensionType()) - return - } - read = int(big.Uint32(p[1:])) - off = 6 - - default: - err = badPrefix(ExtensionType, lead) - return - } - - p, err = m.R.Peek(read + off) - if err != nil { - return - } - err = e.UnmarshalBinary(p[off:]) - if err == nil { - _, err = m.R.Skip(read + off) - } - return -} - -// AppendExtension appends a MessagePack extension to the provided slice -func AppendExtension(b []byte, e Extension) ([]byte, error) { - l := e.Len() - var o []byte - var n int - switch l { - case 0: - o, n = ensure(b, 3) - o[n] = mext8 - o[n+1] = 0 - o[n+2] = byte(e.ExtensionType()) - return o[:n+3], nil - case 1: - o, n = ensure(b, 3) - o[n] = mfixext1 - o[n+1] = byte(e.ExtensionType()) - n += 2 - case 2: - o, n = ensure(b, 4) - o[n] = mfixext2 - o[n+1] = byte(e.ExtensionType()) - n += 2 - case 4: - o, n = ensure(b, 6) - o[n] = mfixext4 - o[n+1] = byte(e.ExtensionType()) - n += 2 - case 8: - o, n = ensure(b, 10) - o[n] = mfixext8 - o[n+1] = byte(e.ExtensionType()) - n += 2 - case 16: - o, n = ensure(b, 18) - o[n] = mfixext16 - o[n+1] = byte(e.ExtensionType()) - n += 2 - default: - switch { - case l < math.MaxUint8: - o, n = ensure(b, l+3) - o[n] = mext8 - o[n+1] = byte(uint8(l)) - o[n+2] = byte(e.ExtensionType()) - n += 3 - case l < math.MaxUint16: - o, n = ensure(b, l+4) - o[n] = mext16 - big.PutUint16(o[n+1:], uint16(l)) - o[n+3] = byte(e.ExtensionType()) - n += 4 - default: - o, n = ensure(b, l+6) - o[n] = mext32 - big.PutUint32(o[n+1:], uint32(l)) - o[n+5] = byte(e.ExtensionType()) - n += 6 - } - } - return o, e.MarshalBinaryTo(o[n:]) -} - -// ReadExtensionBytes reads an extension from 'b' into 'e' -// and returns any remaining bytes. -// Possible errors: -// - ErrShortBytes ('b' not long enough) -// - ExtensionTypeError{} (wire type not the same as e.Type()) -// - TypeError{} (next object not an extension) -// - InvalidPrefixError -// - An umarshal error returned from e.UnmarshalBinary -func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) { - l := len(b) - if l < 3 { - return b, ErrShortBytes - } - lead := b[0] - var ( - sz int // size of 'data' - off int // offset of 'data' - typ int8 - ) - switch lead { - case mfixext1: - typ = int8(b[1]) - sz = 1 - off = 2 - case mfixext2: - typ = int8(b[1]) - sz = 2 - off = 2 - case mfixext4: - typ = int8(b[1]) - sz = 4 - off = 2 - case mfixext8: - typ = int8(b[1]) - sz = 8 - off = 2 - case mfixext16: - typ = int8(b[1]) - sz = 16 - off = 2 - case mext8: - sz = int(uint8(b[1])) - typ = int8(b[2]) - off = 3 - if sz == 0 { - return b[3:], e.UnmarshalBinary(b[3:3]) - } - case mext16: - if l < 4 { - return b, ErrShortBytes - } - sz = int(big.Uint16(b[1:])) - typ = int8(b[3]) - off = 4 - case mext32: - if l < 6 { - return b, ErrShortBytes - } - sz = int(big.Uint32(b[1:])) - typ = int8(b[5]) - off = 6 - default: - return b, badPrefix(ExtensionType, lead) - } - - if typ != e.ExtensionType() { - return b, errExt(typ, e.ExtensionType()) - } - - // the data of the extension starts - // at 'off' and is 'sz' bytes long - if len(b[off:]) < sz { - return b, ErrShortBytes - } - tot := off + sz - return b[tot:], e.UnmarshalBinary(b[off:tot]) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go deleted file mode 100644 index 8e7370ebc..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/file.go +++ /dev/null @@ -1,92 +0,0 @@ -// +build linux darwin dragonfly freebsd netbsd openbsd -// +build !appengine - -package msgp - -import ( - "os" - "syscall" -) - -// ReadFile reads a file into 'dst' using -// a read-only memory mapping. Consequently, -// the file must be mmap-able, and the -// Unmarshaler should never write to -// the source memory. (Methods generated -// by the msgp tool obey that constraint, but -// user-defined implementations may not.) -// -// Reading and writing through file mappings -// is only efficient for large files; small -// files are best read and written using -// the ordinary streaming interfaces. -// -func ReadFile(dst Unmarshaler, file *os.File) error { - stat, err := file.Stat() - if err != nil { - return err - } - data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED) - if err != nil { - return err - } - adviseRead(data) - _, err = dst.UnmarshalMsg(data) - uerr := syscall.Munmap(data) - if err == nil { - err = uerr - } - return err -} - -// MarshalSizer is the combination -// of the Marshaler and Sizer -// interfaces. -type MarshalSizer interface { - Marshaler - Sizer -} - -// WriteFile writes a file from 'src' using -// memory mapping. It overwrites the entire -// contents of the previous file. -// The mapping size is calculated -// using the `Msgsize()` method -// of 'src', so it must produce a result -// equal to or greater than the actual encoded -// size of the object. Otherwise, -// a fault (SIGBUS) will occur. -// -// Reading and writing through file mappings -// is only efficient for large files; small -// files are best read and written using -// the ordinary streaming interfaces. -// -// NOTE: The performance of this call -// is highly OS- and filesystem-dependent. -// Users should take care to test that this -// performs as expected in a production environment. -// (Linux users should run a kernel and filesystem -// that support fallocate(2) for the best results.) -func WriteFile(src MarshalSizer, file *os.File) error { - sz := src.Msgsize() - err := fallocate(file, int64(sz)) - if err != nil { - return err - } - data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) - if err != nil { - return err - } - adviseWrite(data) - chunk := data[:0] - chunk, err = src.MarshalMsg(chunk) - if err != nil { - return err - } - uerr := syscall.Munmap(data) - if uerr != nil { - return uerr - } - return file.Truncate(int64(len(chunk))) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go deleted file mode 100644 index 6e654dbdc..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/file_port.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build windows appengine - -package msgp - -import ( - "io/ioutil" - "os" -) - -// MarshalSizer is the combination -// of the Marshaler and Sizer -// interfaces. -type MarshalSizer interface { - Marshaler - Sizer -} - -func ReadFile(dst Unmarshaler, file *os.File) error { - if u, ok := dst.(Decodable); ok { - return u.DecodeMsg(NewReader(file)) - } - - data, err := ioutil.ReadAll(file) - if err != nil { - return err - } - _, err = dst.UnmarshalMsg(data) - return err -} - -func WriteFile(src MarshalSizer, file *os.File) error { - if e, ok := src.(Encodable); ok { - w := NewWriter(file) - err := e.EncodeMsg(w) - if err == nil { - err = w.Flush() - } - return err - } - - raw, err := src.MarshalMsg(nil) - if err != nil { - return err - } - _, err = file.Write(raw) - return err -} diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go deleted file mode 100644 index f817d7759..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/integers.go +++ /dev/null @@ -1,174 +0,0 @@ -package msgp - -/* ---------------------------------- - integer encoding utilities - (inline-able) - - TODO(tinylib): there are faster, - albeit non-portable solutions - to the code below. implement - byteswap? - ---------------------------------- */ - -func putMint64(b []byte, i int64) { - b[0] = mint64 - b[1] = byte(i >> 56) - b[2] = byte(i >> 48) - b[3] = byte(i >> 40) - b[4] = byte(i >> 32) - b[5] = byte(i >> 24) - b[6] = byte(i >> 16) - b[7] = byte(i >> 8) - b[8] = byte(i) -} - -func getMint64(b []byte) int64 { - return (int64(b[1]) << 56) | (int64(b[2]) << 48) | - (int64(b[3]) << 40) | (int64(b[4]) << 32) | - (int64(b[5]) << 24) | (int64(b[6]) << 16) | - (int64(b[7]) << 8) | (int64(b[8])) -} - -func putMint32(b []byte, i int32) { - b[0] = mint32 - b[1] = byte(i >> 24) - b[2] = byte(i >> 16) - b[3] = byte(i >> 8) - b[4] = byte(i) -} - -func getMint32(b []byte) int32 { - return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4])) -} - -func putMint16(b []byte, i int16) { - b[0] = mint16 - b[1] = byte(i >> 8) - b[2] = byte(i) -} - -func getMint16(b []byte) (i int16) { - return (int16(b[1]) << 8) | int16(b[2]) -} - -func putMint8(b []byte, i int8) { - b[0] = mint8 - b[1] = byte(i) -} - -func getMint8(b []byte) (i int8) { - return int8(b[1]) -} - -func putMuint64(b []byte, u uint64) { - b[0] = muint64 - b[1] = byte(u >> 56) - b[2] = byte(u >> 48) - b[3] = byte(u >> 40) - b[4] = byte(u >> 32) - b[5] = byte(u >> 24) - b[6] = byte(u >> 16) - b[7] = byte(u >> 8) - b[8] = byte(u) -} - -func getMuint64(b []byte) uint64 { - return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) | - (uint64(b[3]) << 40) | (uint64(b[4]) << 32) | - (uint64(b[5]) << 24) | (uint64(b[6]) << 16) | - (uint64(b[7]) << 8) | (uint64(b[8])) -} - -func putMuint32(b []byte, u uint32) { - b[0] = muint32 - b[1] = byte(u >> 24) - b[2] = byte(u >> 16) - b[3] = byte(u >> 8) - b[4] = byte(u) -} - -func getMuint32(b []byte) uint32 { - return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4])) -} - -func putMuint16(b []byte, u uint16) { - b[0] = muint16 - b[1] = byte(u >> 8) - b[2] = byte(u) -} - -func getMuint16(b []byte) uint16 { - return (uint16(b[1]) << 8) | uint16(b[2]) -} - -func putMuint8(b []byte, u uint8) { - b[0] = muint8 - b[1] = byte(u) -} - -func getMuint8(b []byte) uint8 { - return uint8(b[1]) -} - -func getUnix(b []byte) (sec int64, nsec int32) { - sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) | - (int64(b[2]) << 40) | (int64(b[3]) << 32) | - (int64(b[4]) << 24) | (int64(b[5]) << 16) | - (int64(b[6]) << 8) | (int64(b[7])) - - nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11])) - return -} - -func putUnix(b []byte, sec int64, nsec int32) { - b[0] = byte(sec >> 56) - b[1] = byte(sec >> 48) - b[2] = byte(sec >> 40) - b[3] = byte(sec >> 32) - b[4] = byte(sec >> 24) - b[5] = byte(sec >> 16) - b[6] = byte(sec >> 8) - b[7] = byte(sec) - b[8] = byte(nsec >> 24) - b[9] = byte(nsec >> 16) - b[10] = byte(nsec >> 8) - b[11] = byte(nsec) -} - -/* ----------------------------- - prefix utilities - ----------------------------- */ - -// write prefix and uint8 -func prefixu8(b []byte, pre byte, sz uint8) { - b[0] = pre - b[1] = byte(sz) -} - -// write prefix and big-endian uint16 -func prefixu16(b []byte, pre byte, sz uint16) { - b[0] = pre - b[1] = byte(sz >> 8) - b[2] = byte(sz) -} - -// write prefix and big-endian uint32 -func prefixu32(b []byte, pre byte, sz uint32) { - b[0] = pre - b[1] = byte(sz >> 24) - b[2] = byte(sz >> 16) - b[3] = byte(sz >> 8) - b[4] = byte(sz) -} - -func prefixu64(b []byte, pre byte, sz uint64) { - b[0] = pre - b[1] = byte(sz >> 56) - b[2] = byte(sz >> 48) - b[3] = byte(sz >> 40) - b[4] = byte(sz >> 32) - b[5] = byte(sz >> 24) - b[6] = byte(sz >> 16) - b[7] = byte(sz >> 8) - b[8] = byte(sz) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go deleted file mode 100644 index 77601e52c..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/json.go +++ /dev/null @@ -1,568 +0,0 @@ -package msgp - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "io" - "strconv" - "unicode/utf8" -) - -var ( - null = []byte("null") - hex = []byte("0123456789abcdef") -) - -var defuns [_maxtype]func(jsWriter, *Reader) (int, error) - -// note: there is an initialization loop if -// this isn't set up during init() -func init() { - // since none of these functions are inline-able, - // there is not much of a penalty to the indirect - // call. however, this is best expressed as a jump-table... - defuns = [_maxtype]func(jsWriter, *Reader) (int, error){ - StrType: rwString, - BinType: rwBytes, - MapType: rwMap, - ArrayType: rwArray, - Float64Type: rwFloat64, - Float32Type: rwFloat32, - BoolType: rwBool, - IntType: rwInt, - UintType: rwUint, - NilType: rwNil, - ExtensionType: rwExtension, - Complex64Type: rwExtension, - Complex128Type: rwExtension, - TimeType: rwTime, - } -} - -// this is the interface -// used to write json -type jsWriter interface { - io.Writer - io.ByteWriter - WriteString(string) (int, error) -} - -// CopyToJSON reads MessagePack from 'src' and copies it -// as JSON to 'dst' until EOF. -func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) { - r := NewReader(src) - n, err = r.WriteToJSON(dst) - freeR(r) - return -} - -// WriteToJSON translates MessagePack from 'r' and writes it as -// JSON to 'w' until the underlying reader returns io.EOF. It returns -// the number of bytes written, and an error if it stopped before EOF. -func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) { - var j jsWriter - var bf *bufio.Writer - if jsw, ok := w.(jsWriter); ok { - j = jsw - } else { - bf = bufio.NewWriter(w) - j = bf - } - var nn int - for err == nil { - nn, err = rwNext(j, r) - n += int64(nn) - } - if err != io.EOF { - if bf != nil { - bf.Flush() - } - return - } - err = nil - if bf != nil { - err = bf.Flush() - } - return -} - -func rwNext(w jsWriter, src *Reader) (int, error) { - t, err := src.NextType() - if err != nil { - return 0, err - } - return defuns[t](w, src) -} - -func rwMap(dst jsWriter, src *Reader) (n int, err error) { - var comma bool - var sz uint32 - var field []byte - - sz, err = src.ReadMapHeader() - if err != nil { - return - } - - if sz == 0 { - return dst.WriteString("{}") - } - - err = dst.WriteByte('{') - if err != nil { - return - } - n++ - var nn int - for i := uint32(0); i < sz; i++ { - if comma { - err = dst.WriteByte(',') - if err != nil { - return - } - n++ - } - - field, err = src.ReadMapKeyPtr() - if err != nil { - return - } - nn, err = rwquoted(dst, field) - n += nn - if err != nil { - return - } - - err = dst.WriteByte(':') - if err != nil { - return - } - n++ - nn, err = rwNext(dst, src) - n += nn - if err != nil { - return - } - if !comma { - comma = true - } - } - - err = dst.WriteByte('}') - if err != nil { - return - } - n++ - return -} - -func rwArray(dst jsWriter, src *Reader) (n int, err error) { - err = dst.WriteByte('[') - if err != nil { - return - } - var sz uint32 - var nn int - sz, err = src.ReadArrayHeader() - if err != nil { - return - } - comma := false - for i := uint32(0); i < sz; i++ { - if comma { - err = dst.WriteByte(',') - if err != nil { - return - } - n++ - } - nn, err = rwNext(dst, src) - n += nn - if err != nil { - return - } - comma = true - } - - err = dst.WriteByte(']') - if err != nil { - return - } - n++ - return -} - -func rwNil(dst jsWriter, src *Reader) (int, error) { - err := src.ReadNil() - if err != nil { - return 0, err - } - return dst.Write(null) -} - -func rwFloat32(dst jsWriter, src *Reader) (int, error) { - f, err := src.ReadFloat32() - if err != nil { - return 0, err - } - src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 64) - return dst.Write(src.scratch) -} - -func rwFloat64(dst jsWriter, src *Reader) (int, error) { - f, err := src.ReadFloat64() - if err != nil { - return 0, err - } - src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 32) - return dst.Write(src.scratch) -} - -func rwInt(dst jsWriter, src *Reader) (int, error) { - i, err := src.ReadInt64() - if err != nil { - return 0, err - } - src.scratch = strconv.AppendInt(src.scratch[:0], i, 10) - return dst.Write(src.scratch) -} - -func rwUint(dst jsWriter, src *Reader) (int, error) { - u, err := src.ReadUint64() - if err != nil { - return 0, err - } - src.scratch = strconv.AppendUint(src.scratch[:0], u, 10) - return dst.Write(src.scratch) -} - -func rwBool(dst jsWriter, src *Reader) (int, error) { - b, err := src.ReadBool() - if err != nil { - return 0, err - } - if b { - return dst.WriteString("true") - } - return dst.WriteString("false") -} - -func rwTime(dst jsWriter, src *Reader) (int, error) { - t, err := src.ReadTime() - if err != nil { - return 0, err - } - bts, err := t.MarshalJSON() - if err != nil { - return 0, err - } - return dst.Write(bts) -} - -func rwExtension(dst jsWriter, src *Reader) (n int, err error) { - et, err := src.peekExtensionType() - if err != nil { - return 0, err - } - - // registered extensions can override - // the JSON encoding - if j, ok := extensionReg[et]; ok { - var bts []byte - e := j() - err = src.ReadExtension(e) - if err != nil { - return - } - bts, err = json.Marshal(e) - if err != nil { - return - } - return dst.Write(bts) - } - - e := RawExtension{} - e.Type = et - err = src.ReadExtension(&e) - if err != nil { - return - } - - var nn int - err = dst.WriteByte('{') - if err != nil { - return - } - n++ - - nn, err = dst.WriteString(`"type:"`) - n += nn - if err != nil { - return - } - - src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10) - nn, err = dst.Write(src.scratch) - n += nn - if err != nil { - return - } - - nn, err = dst.WriteString(`,"data":"`) - n += nn - if err != nil { - return - } - - enc := base64.NewEncoder(base64.StdEncoding, dst) - - nn, err = enc.Write(e.Data) - n += nn - if err != nil { - return - } - err = enc.Close() - if err != nil { - return - } - nn, err = dst.WriteString(`"}`) - n += nn - return -} - -func rwString(dst jsWriter, src *Reader) (n int, err error) { - var p []byte - p, err = src.R.Peek(1) - if err != nil { - return - } - lead := p[0] - var read int - - if isfixstr(lead) { - read = int(rfixstr(lead)) - src.R.Skip(1) - goto write - } - - switch lead { - case mstr8: - p, err = src.R.Next(2) - if err != nil { - return - } - read = int(uint8(p[1])) - case mstr16: - p, err = src.R.Next(3) - if err != nil { - return - } - read = int(big.Uint16(p[1:])) - case mstr32: - p, err = src.R.Next(5) - if err != nil { - return - } - read = int(big.Uint32(p[1:])) - default: - err = badPrefix(StrType, lead) - return - } -write: - p, err = src.R.Next(read) - if err != nil { - return - } - n, err = rwquoted(dst, p) - return -} - -func rwBytes(dst jsWriter, src *Reader) (n int, err error) { - var nn int - err = dst.WriteByte('"') - if err != nil { - return - } - n++ - src.scratch, err = src.ReadBytes(src.scratch[:0]) - if err != nil { - return - } - enc := base64.NewEncoder(base64.StdEncoding, dst) - nn, err = enc.Write(src.scratch) - n += nn - if err != nil { - return - } - err = enc.Close() - if err != nil { - return - } - err = dst.WriteByte('"') - if err != nil { - return - } - n++ - return -} - -// Below (c) The Go Authors, 2009-2014 -// Subject to the BSD-style license found at http://golang.org -// -// see: encoding/json/encode.go:(*encodeState).stringbytes() -func rwquoted(dst jsWriter, s []byte) (n int, err error) { - var nn int - err = dst.WriteByte('"') - if err != nil { - return - } - n++ - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - nn, err = dst.Write(s[start:i]) - n += nn - if err != nil { - return - } - } - switch b { - case '\\', '"': - err = dst.WriteByte('\\') - if err != nil { - return - } - n++ - err = dst.WriteByte(b) - if err != nil { - return - } - n++ - case '\n': - err = dst.WriteByte('\\') - if err != nil { - return - } - n++ - err = dst.WriteByte('n') - if err != nil { - return - } - n++ - case '\r': - err = dst.WriteByte('\\') - if err != nil { - return - } - n++ - err = dst.WriteByte('r') - if err != nil { - return - } - n++ - case '\t': - err = dst.WriteByte('\\') - if err != nil { - return - } - n++ - err = dst.WriteByte('t') - if err != nil { - return - } - n++ - default: - // This encodes bytes < 0x20 except for \t, \n and \r. - // It also escapes <, >, and & - // because they can lead to security holes when - // user-controlled strings are rendered into JSON - // and served to some browsers. - nn, err = dst.WriteString(`\u00`) - n += nn - if err != nil { - return - } - err = dst.WriteByte(hex[b>>4]) - if err != nil { - return - } - n++ - err = dst.WriteByte(hex[b&0xF]) - if err != nil { - return - } - n++ - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - nn, err = dst.Write(s[start:i]) - n += nn - if err != nil { - return - } - } - nn, err = dst.WriteString(`\ufffd`) - n += nn - if err != nil { - return - } - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. - // U+2029 is PARAGRAPH SEPARATOR. - // They are both technically valid characters in JSON strings, - // but don't work in JSONP, which has to be evaluated as JavaScript, - // and can lead to security holes there. It is valid JSON to - // escape them, so we do so unconditionally. - // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. - if c == '\u2028' || c == '\u2029' { - if start < i { - nn, err = dst.Write(s[start:i]) - n += nn - if err != nil { - return - } - } - nn, err = dst.WriteString(`\u202`) - n += nn - if err != nil { - return - } - err = dst.WriteByte(hex[c&0xF]) - if err != nil { - return - } - n++ - i += size - start = i - continue - } - i += size - } - if start < len(s) { - nn, err = dst.Write(s[start:]) - n += nn - if err != nil { - return - } - } - err = dst.WriteByte('"') - if err != nil { - return - } - n++ - return -} diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go deleted file mode 100644 index 438caf539..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go +++ /dev/null @@ -1,363 +0,0 @@ -package msgp - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "io" - "strconv" - "time" -) - -var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error) - -func init() { - - // NOTE(pmh): this is best expressed as a jump table, - // but gc doesn't do that yet. revisit post-go1.5. - unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){ - StrType: rwStringBytes, - BinType: rwBytesBytes, - MapType: rwMapBytes, - ArrayType: rwArrayBytes, - Float64Type: rwFloat64Bytes, - Float32Type: rwFloat32Bytes, - BoolType: rwBoolBytes, - IntType: rwIntBytes, - UintType: rwUintBytes, - NilType: rwNullBytes, - ExtensionType: rwExtensionBytes, - Complex64Type: rwExtensionBytes, - Complex128Type: rwExtensionBytes, - TimeType: rwTimeBytes, - } -} - -// UnmarshalAsJSON takes raw messagepack and writes -// it as JSON to 'w'. If an error is returned, the -// bytes not translated will also be returned. If -// no errors are encountered, the length of the returned -// slice will be zero. -func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) { - var ( - scratch []byte - cast bool - dst jsWriter - err error - ) - if jsw, ok := w.(jsWriter); ok { - dst = jsw - cast = true - } else { - dst = bufio.NewWriterSize(w, 512) - } - for len(msg) > 0 && err == nil { - msg, scratch, err = writeNext(dst, msg, scratch) - } - if !cast && err == nil { - err = dst.(*bufio.Writer).Flush() - } - return msg, err -} - -func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - if len(msg) < 1 { - return msg, scratch, ErrShortBytes - } - t := getType(msg[0]) - if t == InvalidType { - return msg, scratch, InvalidPrefixError(msg[0]) - } - if t == ExtensionType { - et, err := peekExtension(msg) - if err != nil { - return nil, scratch, err - } - if et == TimeExtension { - t = TimeType - } - } - return unfuns[t](w, msg, scratch) -} - -func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - sz, msg, err := ReadArrayHeaderBytes(msg) - if err != nil { - return msg, scratch, err - } - err = w.WriteByte('[') - if err != nil { - return msg, scratch, err - } - for i := uint32(0); i < sz; i++ { - if i != 0 { - err = w.WriteByte(',') - if err != nil { - return msg, scratch, err - } - } - msg, scratch, err = writeNext(w, msg, scratch) - if err != nil { - return msg, scratch, err - } - } - err = w.WriteByte(']') - return msg, scratch, err -} - -func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - sz, msg, err := ReadMapHeaderBytes(msg) - if err != nil { - return msg, scratch, err - } - err = w.WriteByte('{') - if err != nil { - return msg, scratch, err - } - for i := uint32(0); i < sz; i++ { - if i != 0 { - err = w.WriteByte(',') - if err != nil { - return msg, scratch, err - } - } - msg, scratch, err = rwMapKeyBytes(w, msg, scratch) - if err != nil { - return msg, scratch, err - } - err = w.WriteByte(':') - if err != nil { - return msg, scratch, err - } - msg, scratch, err = writeNext(w, msg, scratch) - if err != nil { - return msg, scratch, err - } - } - err = w.WriteByte('}') - return msg, scratch, err -} - -func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - msg, scratch, err := rwStringBytes(w, msg, scratch) - if err != nil { - if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { - return rwBytesBytes(w, msg, scratch) - } - } - return msg, scratch, err -} - -func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - str, msg, err := ReadStringZC(msg) - if err != nil { - return msg, scratch, err - } - _, err = rwquoted(w, str) - return msg, scratch, err -} - -func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - bts, msg, err := ReadBytesZC(msg) - if err != nil { - return msg, scratch, err - } - l := base64.StdEncoding.EncodedLen(len(bts)) - if cap(scratch) >= l { - scratch = scratch[0:l] - } else { - scratch = make([]byte, l) - } - base64.StdEncoding.Encode(scratch, bts) - err = w.WriteByte('"') - if err != nil { - return msg, scratch, err - } - _, err = w.Write(scratch) - if err != nil { - return msg, scratch, err - } - err = w.WriteByte('"') - return msg, scratch, err -} - -func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - msg, err := ReadNilBytes(msg) - if err != nil { - return msg, scratch, err - } - _, err = w.Write(null) - return msg, scratch, err -} - -func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - b, msg, err := ReadBoolBytes(msg) - if err != nil { - return msg, scratch, err - } - if b { - _, err = w.WriteString("true") - return msg, scratch, err - } - _, err = w.WriteString("false") - return msg, scratch, err -} - -func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - i, msg, err := ReadInt64Bytes(msg) - if err != nil { - return msg, scratch, err - } - scratch = strconv.AppendInt(scratch[0:0], i, 10) - _, err = w.Write(scratch) - return msg, scratch, err -} - -func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - u, msg, err := ReadUint64Bytes(msg) - if err != nil { - return msg, scratch, err - } - scratch = strconv.AppendUint(scratch[0:0], u, 10) - _, err = w.Write(scratch) - return msg, scratch, err -} - -func rwFloatBytes(w jsWriter, msg []byte, f64 bool, scratch []byte) ([]byte, []byte, error) { - var f float64 - var err error - var sz int - if f64 { - sz = 64 - f, msg, err = ReadFloat64Bytes(msg) - } else { - sz = 32 - var v float32 - v, msg, err = ReadFloat32Bytes(msg) - f = float64(v) - } - if err != nil { - return msg, scratch, err - } - scratch = strconv.AppendFloat(scratch, f, 'f', -1, sz) - _, err = w.Write(scratch) - return msg, scratch, err -} - -func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - var f float32 - var err error - f, msg, err = ReadFloat32Bytes(msg) - if err != nil { - return msg, scratch, err - } - scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32) - _, err = w.Write(scratch) - return msg, scratch, err -} - -func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - var f float64 - var err error - f, msg, err = ReadFloat64Bytes(msg) - if err != nil { - return msg, scratch, err - } - scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64) - _, err = w.Write(scratch) - return msg, scratch, err -} - -func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - var t time.Time - var err error - t, msg, err = ReadTimeBytes(msg) - if err != nil { - return msg, scratch, err - } - bts, err := t.MarshalJSON() - if err != nil { - return msg, scratch, err - } - _, err = w.Write(bts) - return msg, scratch, err -} - -func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - var err error - var et int8 - et, err = peekExtension(msg) - if err != nil { - return msg, scratch, err - } - - // if it's time.Time - if et == TimeExtension { - var tm time.Time - tm, msg, err = ReadTimeBytes(msg) - if err != nil { - return msg, scratch, err - } - bts, err := tm.MarshalJSON() - if err != nil { - return msg, scratch, err - } - _, err = w.Write(bts) - return msg, scratch, err - } - - // if the extension is registered, - // use its canonical JSON form - if f, ok := extensionReg[et]; ok { - e := f() - msg, err = ReadExtensionBytes(msg, e) - if err != nil { - return msg, scratch, err - } - bts, err := json.Marshal(e) - if err != nil { - return msg, scratch, err - } - _, err = w.Write(bts) - return msg, scratch, err - } - - // otherwise, write `{"type": , "data": ""}` - r := RawExtension{} - r.Type = et - msg, err = ReadExtensionBytes(msg, &r) - if err != nil { - return msg, scratch, err - } - scratch, err = writeExt(w, r, scratch) - return msg, scratch, err -} - -func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) { - _, err := w.WriteString(`{"type":`) - if err != nil { - return scratch, err - } - scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10) - _, err = w.Write(scratch) - if err != nil { - return scratch, err - } - _, err = w.WriteString(`,"data":"`) - if err != nil { - return scratch, err - } - l := base64.StdEncoding.EncodedLen(len(r.Data)) - if cap(scratch) >= l { - scratch = scratch[0:l] - } else { - scratch = make([]byte, l) - } - base64.StdEncoding.Encode(scratch, r.Data) - _, err = w.Write(scratch) - if err != nil { - return scratch, err - } - _, err = w.WriteString(`"}`) - return scratch, err -} diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go deleted file mode 100644 index ad07ef995..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/number.go +++ /dev/null @@ -1,267 +0,0 @@ -package msgp - -import ( - "math" - "strconv" -) - -// The portable parts of the Number implementation - -// Number can be -// an int64, uint64, float32, -// or float64 internally. -// It can decode itself -// from any of the native -// messagepack number types. -// The zero-value of Number -// is Int(0). Using the equality -// operator with Number compares -// both the type and the value -// of the number. -type Number struct { - // internally, this - // is just a tagged union. - // the raw bits of the number - // are stored the same way regardless. - bits uint64 - typ Type -} - -// AsInt sets the number to an int64. -func (n *Number) AsInt(i int64) { - - // we always store int(0) - // as {0, InvalidType} in - // order to preserve - // the behavior of the == operator - if i == 0 { - n.typ = InvalidType - n.bits = 0 - return - } - - n.typ = IntType - n.bits = uint64(i) -} - -// AsUint sets the number to a uint64. -func (n *Number) AsUint(u uint64) { - n.typ = UintType - n.bits = u -} - -// AsFloat32 sets the value of the number -// to a float32. -func (n *Number) AsFloat32(f float32) { - n.typ = Float32Type - n.bits = uint64(math.Float32bits(f)) -} - -// AsFloat64 sets the value of the -// number to a float64. -func (n *Number) AsFloat64(f float64) { - n.typ = Float64Type - n.bits = math.Float64bits(f) -} - -// Int casts the number as an int64, and -// returns whether or not that was the -// underlying type. -func (n *Number) Int() (int64, bool) { - return int64(n.bits), n.typ == IntType || n.typ == InvalidType -} - -// Uint casts the number as a uint64, and returns -// whether or not that was the underlying type. -func (n *Number) Uint() (uint64, bool) { - return n.bits, n.typ == UintType -} - -// Float casts the number to a float64, and -// returns whether or not that was the underlying -// type (either a float64 or a float32). -func (n *Number) Float() (float64, bool) { - switch n.typ { - case Float32Type: - return float64(math.Float32frombits(uint32(n.bits))), true - case Float64Type: - return math.Float64frombits(n.bits), true - default: - return 0.0, false - } -} - -// Type will return one of: -// Float64Type, Float32Type, UintType, or IntType. -func (n *Number) Type() Type { - if n.typ == InvalidType { - return IntType - } - return n.typ -} - -// DecodeMsg implements msgp.Decodable -func (n *Number) DecodeMsg(r *Reader) error { - typ, err := r.NextType() - if err != nil { - return err - } - switch typ { - case Float32Type: - f, err := r.ReadFloat32() - if err != nil { - return err - } - n.AsFloat32(f) - return nil - case Float64Type: - f, err := r.ReadFloat64() - if err != nil { - return err - } - n.AsFloat64(f) - return nil - case IntType: - i, err := r.ReadInt64() - if err != nil { - return err - } - n.AsInt(i) - return nil - case UintType: - u, err := r.ReadUint64() - if err != nil { - return err - } - n.AsUint(u) - return nil - default: - return TypeError{Encoded: typ, Method: IntType} - } -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) { - typ := NextType(b) - switch typ { - case IntType: - i, o, err := ReadInt64Bytes(b) - if err != nil { - return b, err - } - n.AsInt(i) - return o, nil - case UintType: - u, o, err := ReadUint64Bytes(b) - if err != nil { - return b, err - } - n.AsUint(u) - return o, nil - case Float64Type: - f, o, err := ReadFloat64Bytes(b) - if err != nil { - return b, err - } - n.AsFloat64(f) - return o, nil - case Float32Type: - f, o, err := ReadFloat32Bytes(b) - if err != nil { - return b, err - } - n.AsFloat32(f) - return o, nil - default: - return b, TypeError{Method: IntType, Encoded: typ} - } -} - -// MarshalMsg implements msgp.Marshaler -func (n *Number) MarshalMsg(b []byte) ([]byte, error) { - switch n.typ { - case IntType: - return AppendInt64(b, int64(n.bits)), nil - case UintType: - return AppendUint64(b, uint64(n.bits)), nil - case Float64Type: - return AppendFloat64(b, math.Float64frombits(n.bits)), nil - case Float32Type: - return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil - default: - return AppendInt64(b, 0), nil - } -} - -// EncodeMsg implements msgp.Encodable -func (n *Number) EncodeMsg(w *Writer) error { - switch n.typ { - case IntType: - return w.WriteInt64(int64(n.bits)) - case UintType: - return w.WriteUint64(n.bits) - case Float64Type: - return w.WriteFloat64(math.Float64frombits(n.bits)) - case Float32Type: - return w.WriteFloat32(math.Float32frombits(uint32(n.bits))) - default: - return w.WriteInt64(0) - } -} - -// Msgsize implements msgp.Sizer -func (n *Number) Msgsize() int { - switch n.typ { - case Float32Type: - return Float32Size - case Float64Type: - return Float64Size - case IntType: - return Int64Size - case UintType: - return Uint64Size - default: - return 1 // fixint(0) - } -} - -// MarshalJSON implements json.Marshaler -func (n *Number) MarshalJSON() ([]byte, error) { - t := n.Type() - if t == InvalidType { - return []byte{'0'}, nil - } - out := make([]byte, 0, 32) - switch t { - case Float32Type, Float64Type: - f, _ := n.Float() - return strconv.AppendFloat(out, f, 'f', -1, 64), nil - case IntType: - i, _ := n.Int() - return strconv.AppendInt(out, i, 10), nil - case UintType: - u, _ := n.Uint() - return strconv.AppendUint(out, u, 10), nil - default: - panic("(*Number).typ is invalid") - } -} - -// String implements fmt.Stringer -func (n *Number) String() string { - switch n.typ { - case InvalidType: - return "0" - case Float32Type, Float64Type: - f, _ := n.Float() - return strconv.FormatFloat(f, 'f', -1, 64) - case IntType: - i, _ := n.Int() - return strconv.FormatInt(i, 10) - case UintType: - u, _ := n.Uint() - return strconv.FormatUint(u, 10) - default: - panic("(*Number).typ is invalid") - } -} diff --git a/vendor/github.com/tinylib/msgp/msgp/purego.go b/vendor/github.com/tinylib/msgp/msgp/purego.go deleted file mode 100644 index c828f7eca..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/purego.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build purego appengine - -package msgp - -// let's just assume appengine -// uses 64-bit hardware... -const smallint = false - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go deleted file mode 100644 index aa668c573..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/read.go +++ /dev/null @@ -1,1358 +0,0 @@ -package msgp - -import ( - "io" - "math" - "sync" - "time" - - "github.com/philhofer/fwd" -) - -// where we keep old *Readers -var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }} - -// Type is a MessagePack wire type, -// including this package's built-in -// extension types. -type Type byte - -// MessagePack Types -// -// The zero value of Type -// is InvalidType. -const ( - InvalidType Type = iota - - // MessagePack built-in types - - StrType - BinType - MapType - ArrayType - Float64Type - Float32Type - BoolType - IntType - UintType - NilType - ExtensionType - - // pseudo-types provided - // by extensions - - Complex64Type - Complex128Type - TimeType - - _maxtype -) - -// String implements fmt.Stringer -func (t Type) String() string { - switch t { - case StrType: - return "str" - case BinType: - return "bin" - case MapType: - return "map" - case ArrayType: - return "array" - case Float64Type: - return "float64" - case Float32Type: - return "float32" - case BoolType: - return "bool" - case UintType: - return "uint" - case IntType: - return "int" - case ExtensionType: - return "ext" - case NilType: - return "nil" - default: - return "" - } -} - -func freeR(m *Reader) { - readerPool.Put(m) -} - -// Unmarshaler is the interface fulfilled -// by objects that know how to unmarshal -// themselves from MessagePack. -// UnmarshalMsg unmarshals the object -// from binary, returing any leftover -// bytes and any errors encountered. -type Unmarshaler interface { - UnmarshalMsg([]byte) ([]byte, error) -} - -// Decodable is the interface fulfilled -// by objects that know how to read -// themselves from a *Reader. -type Decodable interface { - DecodeMsg(*Reader) error -} - -// Decode decodes 'd' from 'r'. -func Decode(r io.Reader, d Decodable) error { - rd := NewReader(r) - err := d.DecodeMsg(rd) - freeR(rd) - return err -} - -// NewReader returns a *Reader that -// reads from the provided reader. The -// reader will be buffered. -func NewReader(r io.Reader) *Reader { - p := readerPool.Get().(*Reader) - if p.R == nil { - p.R = fwd.NewReader(r) - } else { - p.R.Reset(r) - } - return p -} - -// NewReaderSize returns a *Reader with a buffer of the given size. -// (This is vastly preferable to passing the decoder a reader that is already buffered.) -func NewReaderSize(r io.Reader, sz int) *Reader { - return &Reader{R: fwd.NewReaderSize(r, sz)} -} - -// Reader wraps an io.Reader and provides -// methods to read MessagePack-encoded values -// from it. Readers are buffered. -type Reader struct { - // R is the buffered reader - // that the Reader uses - // to decode MessagePack. - // The Reader itself - // is stateless; all the - // buffering is done - // within R. - R *fwd.Reader - scratch []byte -} - -// Read implements `io.Reader` -func (m *Reader) Read(p []byte) (int, error) { - return m.R.Read(p) -} - -// CopyNext reads the next object from m without decoding it and writes it to w. -// It avoids unnecessary copies internally. -func (m *Reader) CopyNext(w io.Writer) (int64, error) { - sz, o, err := getNextSize(m.R) - if err != nil { - return 0, err - } - - var n int64 - // Opportunistic optimization: if we can fit the whole thing in the m.R - // buffer, then just get a pointer to that, and pass it to w.Write, - // avoiding an allocation. - if int(sz) <= m.R.BufferSize() { - var nn int - var buf []byte - buf, err = m.R.Next(int(sz)) - if err != nil { - if err == io.ErrUnexpectedEOF { - err = ErrShortBytes - } - return 0, err - } - nn, err = w.Write(buf) - n += int64(nn) - } else { - // Fall back to io.CopyN. - // May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer) - n, err = io.CopyN(w, m.R, int64(sz)) - if err == io.ErrUnexpectedEOF { - err = ErrShortBytes - } - } - if err != nil { - return n, err - } else if n < int64(sz) { - return n, io.ErrShortWrite - } - - // for maps and slices, read elements - for x := uintptr(0); x < o; x++ { - var n2 int64 - n2, err = m.CopyNext(w) - if err != nil { - return n, err - } - n += n2 - } - return n, nil -} - -// ReadFull implements `io.ReadFull` -func (m *Reader) ReadFull(p []byte) (int, error) { - return m.R.ReadFull(p) -} - -// Reset resets the underlying reader. -func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) } - -// Buffered returns the number of bytes currently in the read buffer. -func (m *Reader) Buffered() int { return m.R.Buffered() } - -// BufferSize returns the capacity of the read buffer. -func (m *Reader) BufferSize() int { return m.R.BufferSize() } - -// NextType returns the next object type to be decoded. -func (m *Reader) NextType() (Type, error) { - p, err := m.R.Peek(1) - if err != nil { - return InvalidType, err - } - t := getType(p[0]) - if t == InvalidType { - return t, InvalidPrefixError(p[0]) - } - if t == ExtensionType { - v, err := m.peekExtensionType() - if err != nil { - return InvalidType, err - } - switch v { - case Complex64Extension: - return Complex64Type, nil - case Complex128Extension: - return Complex128Type, nil - case TimeExtension: - return TimeType, nil - } - } - return t, nil -} - -// IsNil returns whether or not -// the next byte is a null messagepack byte -func (m *Reader) IsNil() bool { - p, err := m.R.Peek(1) - return err == nil && p[0] == mnil -} - -// getNextSize returns the size of the next object on the wire. -// returns (obj size, obj elements, error) -// only maps and arrays have non-zero obj elements -// for maps and arrays, obj size does not include elements -// -// use uintptr b/c it's guaranteed to be large enough -// to hold whatever we can fit in memory. -func getNextSize(r *fwd.Reader) (uintptr, uintptr, error) { - b, err := r.Peek(1) - if err != nil { - return 0, 0, err - } - lead := b[0] - spec := &sizes[lead] - size, mode := spec.size, spec.extra - if size == 0 { - return 0, 0, InvalidPrefixError(lead) - } - if mode >= 0 { - return uintptr(size), uintptr(mode), nil - } - b, err = r.Peek(int(size)) - if err != nil { - return 0, 0, err - } - switch mode { - case extra8: - return uintptr(size) + uintptr(b[1]), 0, nil - case extra16: - return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil - case extra32: - return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil - case map16v: - return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil - case map32v: - return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil - case array16v: - return uintptr(size), uintptr(big.Uint16(b[1:])), nil - case array32v: - return uintptr(size), uintptr(big.Uint32(b[1:])), nil - default: - return 0, 0, fatal - } -} - -// Skip skips over the next object, regardless of -// its type. If it is an array or map, the whole array -// or map will be skipped. -func (m *Reader) Skip() error { - var ( - v uintptr // bytes - o uintptr // objects - err error - p []byte - ) - - // we can use the faster - // method if we have enough - // buffered data - if m.R.Buffered() >= 5 { - p, err = m.R.Peek(5) - if err != nil { - return err - } - v, o, err = getSize(p) - if err != nil { - return err - } - } else { - v, o, err = getNextSize(m.R) - if err != nil { - return err - } - } - - // 'v' is always non-zero - // if err == nil - _, err = m.R.Skip(int(v)) - if err != nil { - return err - } - - // for maps and slices, skip elements - for x := uintptr(0); x < o; x++ { - err = m.Skip() - if err != nil { - return err - } - } - return nil -} - -// ReadMapHeader reads the next object -// as a map header and returns the size -// of the map and the number of bytes written. -// It will return a TypeError{} if the next -// object is not a map. -func (m *Reader) ReadMapHeader() (sz uint32, err error) { - var p []byte - var lead byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - if isfixmap(lead) { - sz = uint32(rfixmap(lead)) - _, err = m.R.Skip(1) - return - } - switch lead { - case mmap16: - p, err = m.R.Next(3) - if err != nil { - return - } - sz = uint32(big.Uint16(p[1:])) - return - case mmap32: - p, err = m.R.Next(5) - if err != nil { - return - } - sz = big.Uint32(p[1:]) - return - default: - err = badPrefix(MapType, lead) - return - } -} - -// ReadMapKey reads either a 'str' or 'bin' field from -// the reader and returns the value as a []byte. It uses -// scratch for storage if it is large enough. -func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) { - out, err := m.ReadStringAsBytes(scratch) - if err != nil { - if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { - return m.ReadBytes(scratch) - } - return nil, err - } - return out, nil -} - -// MapKeyPtr returns a []byte pointing to the contents -// of a valid map key. The key cannot be empty, and it -// must be shorter than the total buffer size of the -// *Reader. Additionally, the returned slice is only -// valid until the next *Reader method call. Users -// should exercise extreme care when using this -// method; writing into the returned slice may -// corrupt future reads. -func (m *Reader) ReadMapKeyPtr() ([]byte, error) { - p, err := m.R.Peek(1) - if err != nil { - return nil, err - } - lead := p[0] - var read int - if isfixstr(lead) { - read = int(rfixstr(lead)) - m.R.Skip(1) - goto fill - } - switch lead { - case mstr8, mbin8: - p, err = m.R.Next(2) - if err != nil { - return nil, err - } - read = int(p[1]) - case mstr16, mbin16: - p, err = m.R.Next(3) - if err != nil { - return nil, err - } - read = int(big.Uint16(p[1:])) - case mstr32, mbin32: - p, err = m.R.Next(5) - if err != nil { - return nil, err - } - read = int(big.Uint32(p[1:])) - default: - return nil, badPrefix(StrType, lead) - } -fill: - if read == 0 { - return nil, ErrShortBytes - } - return m.R.Next(read) -} - -// ReadArrayHeader reads the next object as an -// array header and returns the size of the array -// and the number of bytes read. -func (m *Reader) ReadArrayHeader() (sz uint32, err error) { - var lead byte - var p []byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - if isfixarray(lead) { - sz = uint32(rfixarray(lead)) - _, err = m.R.Skip(1) - return - } - switch lead { - case marray16: - p, err = m.R.Next(3) - if err != nil { - return - } - sz = uint32(big.Uint16(p[1:])) - return - - case marray32: - p, err = m.R.Next(5) - if err != nil { - return - } - sz = big.Uint32(p[1:]) - return - - default: - err = badPrefix(ArrayType, lead) - return - } -} - -// ReadNil reads a 'nil' MessagePack byte from the reader -func (m *Reader) ReadNil() error { - p, err := m.R.Peek(1) - if err != nil { - return err - } - if p[0] != mnil { - return badPrefix(NilType, p[0]) - } - _, err = m.R.Skip(1) - return err -} - -// ReadFloat64 reads a float64 from the reader. -// (If the value on the wire is encoded as a float32, -// it will be up-cast to a float64.) -func (m *Reader) ReadFloat64() (f float64, err error) { - var p []byte - p, err = m.R.Peek(9) - if err != nil { - // we'll allow a coversion from float32 to float64, - // since we don't lose any precision - if err == io.EOF && len(p) > 0 && p[0] == mfloat32 { - ef, err := m.ReadFloat32() - return float64(ef), err - } - return - } - if p[0] != mfloat64 { - // see above - if p[0] == mfloat32 { - ef, err := m.ReadFloat32() - return float64(ef), err - } - err = badPrefix(Float64Type, p[0]) - return - } - f = math.Float64frombits(getMuint64(p)) - _, err = m.R.Skip(9) - return -} - -// ReadFloat32 reads a float32 from the reader -func (m *Reader) ReadFloat32() (f float32, err error) { - var p []byte - p, err = m.R.Peek(5) - if err != nil { - return - } - if p[0] != mfloat32 { - err = badPrefix(Float32Type, p[0]) - return - } - f = math.Float32frombits(getMuint32(p)) - _, err = m.R.Skip(5) - return -} - -// ReadBool reads a bool from the reader -func (m *Reader) ReadBool() (b bool, err error) { - var p []byte - p, err = m.R.Peek(1) - if err != nil { - return - } - switch p[0] { - case mtrue: - b = true - case mfalse: - default: - err = badPrefix(BoolType, p[0]) - return - } - _, err = m.R.Skip(1) - return -} - -// ReadInt64 reads an int64 from the reader -func (m *Reader) ReadInt64() (i int64, err error) { - var p []byte - var lead byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - - if isfixint(lead) { - i = int64(rfixint(lead)) - _, err = m.R.Skip(1) - return - } else if isnfixint(lead) { - i = int64(rnfixint(lead)) - _, err = m.R.Skip(1) - return - } - - switch lead { - case mint8: - p, err = m.R.Next(2) - if err != nil { - return - } - i = int64(getMint8(p)) - return - - case muint8: - p, err = m.R.Next(2) - if err != nil { - return - } - i = int64(getMuint8(p)) - return - - case mint16: - p, err = m.R.Next(3) - if err != nil { - return - } - i = int64(getMint16(p)) - return - - case muint16: - p, err = m.R.Next(3) - if err != nil { - return - } - i = int64(getMuint16(p)) - return - - case mint32: - p, err = m.R.Next(5) - if err != nil { - return - } - i = int64(getMint32(p)) - return - - case muint32: - p, err = m.R.Next(5) - if err != nil { - return - } - i = int64(getMuint32(p)) - return - - case mint64: - p, err = m.R.Next(9) - if err != nil { - return - } - i = getMint64(p) - return - - case muint64: - p, err = m.R.Next(9) - if err != nil { - return - } - u := getMuint64(p) - if u > math.MaxInt64 { - err = UintOverflow{Value: u, FailedBitsize: 64} - return - } - i = int64(u) - return - - default: - err = badPrefix(IntType, lead) - return - } -} - -// ReadInt32 reads an int32 from the reader -func (m *Reader) ReadInt32() (i int32, err error) { - var in int64 - in, err = m.ReadInt64() - if in > math.MaxInt32 || in < math.MinInt32 { - err = IntOverflow{Value: in, FailedBitsize: 32} - return - } - i = int32(in) - return -} - -// ReadInt16 reads an int16 from the reader -func (m *Reader) ReadInt16() (i int16, err error) { - var in int64 - in, err = m.ReadInt64() - if in > math.MaxInt16 || in < math.MinInt16 { - err = IntOverflow{Value: in, FailedBitsize: 16} - return - } - i = int16(in) - return -} - -// ReadInt8 reads an int8 from the reader -func (m *Reader) ReadInt8() (i int8, err error) { - var in int64 - in, err = m.ReadInt64() - if in > math.MaxInt8 || in < math.MinInt8 { - err = IntOverflow{Value: in, FailedBitsize: 8} - return - } - i = int8(in) - return -} - -// ReadInt reads an int from the reader -func (m *Reader) ReadInt() (i int, err error) { - if smallint { - var in int32 - in, err = m.ReadInt32() - i = int(in) - return - } - var in int64 - in, err = m.ReadInt64() - i = int(in) - return -} - -// ReadUint64 reads a uint64 from the reader -func (m *Reader) ReadUint64() (u uint64, err error) { - var p []byte - var lead byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - if isfixint(lead) { - u = uint64(rfixint(lead)) - _, err = m.R.Skip(1) - return - } - switch lead { - case mint8: - p, err = m.R.Next(2) - if err != nil { - return - } - v := int64(getMint8(p)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - return - - case muint8: - p, err = m.R.Next(2) - if err != nil { - return - } - u = uint64(getMuint8(p)) - return - - case mint16: - p, err = m.R.Next(3) - if err != nil { - return - } - v := int64(getMint16(p)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - return - - case muint16: - p, err = m.R.Next(3) - if err != nil { - return - } - u = uint64(getMuint16(p)) - return - - case mint32: - p, err = m.R.Next(5) - if err != nil { - return - } - v := int64(getMint32(p)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - return - - case muint32: - p, err = m.R.Next(5) - if err != nil { - return - } - u = uint64(getMuint32(p)) - return - - case mint64: - p, err = m.R.Next(9) - if err != nil { - return - } - v := int64(getMint64(p)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - return - - case muint64: - p, err = m.R.Next(9) - if err != nil { - return - } - u = getMuint64(p) - return - - default: - if isnfixint(lead) { - err = UintBelowZero{Value: int64(rnfixint(lead))} - } else { - err = badPrefix(UintType, lead) - } - return - - } -} - -// ReadUint32 reads a uint32 from the reader -func (m *Reader) ReadUint32() (u uint32, err error) { - var in uint64 - in, err = m.ReadUint64() - if in > math.MaxUint32 { - err = UintOverflow{Value: in, FailedBitsize: 32} - return - } - u = uint32(in) - return -} - -// ReadUint16 reads a uint16 from the reader -func (m *Reader) ReadUint16() (u uint16, err error) { - var in uint64 - in, err = m.ReadUint64() - if in > math.MaxUint16 { - err = UintOverflow{Value: in, FailedBitsize: 16} - return - } - u = uint16(in) - return -} - -// ReadUint8 reads a uint8 from the reader -func (m *Reader) ReadUint8() (u uint8, err error) { - var in uint64 - in, err = m.ReadUint64() - if in > math.MaxUint8 { - err = UintOverflow{Value: in, FailedBitsize: 8} - return - } - u = uint8(in) - return -} - -// ReadUint reads a uint from the reader -func (m *Reader) ReadUint() (u uint, err error) { - if smallint { - var un uint32 - un, err = m.ReadUint32() - u = uint(un) - return - } - var un uint64 - un, err = m.ReadUint64() - u = uint(un) - return -} - -// ReadByte is analogous to ReadUint8. -// -// NOTE: this is *not* an implementation -// of io.ByteReader. -func (m *Reader) ReadByte() (b byte, err error) { - var in uint64 - in, err = m.ReadUint64() - if in > math.MaxUint8 { - err = UintOverflow{Value: in, FailedBitsize: 8} - return - } - b = byte(in) - return -} - -// ReadBytes reads a MessagePack 'bin' object -// from the reader and returns its value. It may -// use 'scratch' for storage if it is non-nil. -func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) { - var p []byte - var lead byte - p, err = m.R.Peek(2) - if err != nil { - return - } - lead = p[0] - var read int64 - switch lead { - case mbin8: - read = int64(p[1]) - m.R.Skip(2) - case mbin16: - p, err = m.R.Next(3) - if err != nil { - return - } - read = int64(big.Uint16(p[1:])) - case mbin32: - p, err = m.R.Next(5) - if err != nil { - return - } - read = int64(big.Uint32(p[1:])) - default: - err = badPrefix(BinType, lead) - return - } - if int64(cap(scratch)) < read { - b = make([]byte, read) - } else { - b = scratch[0:read] - } - _, err = m.R.ReadFull(b) - return -} - -// ReadBytesHeader reads the size header -// of a MessagePack 'bin' object. The user -// is responsible for dealing with the next -// 'sz' bytes from the reader in an application-specific -// way. -func (m *Reader) ReadBytesHeader() (sz uint32, err error) { - var p []byte - p, err = m.R.Peek(1) - if err != nil { - return - } - switch p[0] { - case mbin8: - p, err = m.R.Next(2) - if err != nil { - return - } - sz = uint32(p[1]) - return - case mbin16: - p, err = m.R.Next(3) - if err != nil { - return - } - sz = uint32(big.Uint16(p[1:])) - return - case mbin32: - p, err = m.R.Next(5) - if err != nil { - return - } - sz = uint32(big.Uint32(p[1:])) - return - default: - err = badPrefix(BinType, p[0]) - return - } -} - -// ReadExactBytes reads a MessagePack 'bin'-encoded -// object off of the wire into the provided slice. An -// ArrayError will be returned if the object is not -// exactly the length of the input slice. -func (m *Reader) ReadExactBytes(into []byte) error { - p, err := m.R.Peek(2) - if err != nil { - return err - } - lead := p[0] - var read int64 // bytes to read - var skip int // prefix size to skip - switch lead { - case mbin8: - read = int64(p[1]) - skip = 2 - case mbin16: - p, err = m.R.Peek(3) - if err != nil { - return err - } - read = int64(big.Uint16(p[1:])) - skip = 3 - case mbin32: - p, err = m.R.Peek(5) - if err != nil { - return err - } - read = int64(big.Uint32(p[1:])) - skip = 5 - default: - return badPrefix(BinType, lead) - } - if read != int64(len(into)) { - return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)} - } - m.R.Skip(skip) - _, err = m.R.ReadFull(into) - return err -} - -// ReadStringAsBytes reads a MessagePack 'str' (utf-8) string -// and returns its value as bytes. It may use 'scratch' for storage -// if it is non-nil. -func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) { - var p []byte - var lead byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - var read int64 - - if isfixstr(lead) { - read = int64(rfixstr(lead)) - m.R.Skip(1) - goto fill - } - - switch lead { - case mstr8: - p, err = m.R.Next(2) - if err != nil { - return - } - read = int64(uint8(p[1])) - case mstr16: - p, err = m.R.Next(3) - if err != nil { - return - } - read = int64(big.Uint16(p[1:])) - case mstr32: - p, err = m.R.Next(5) - if err != nil { - return - } - read = int64(big.Uint32(p[1:])) - default: - err = badPrefix(StrType, lead) - return - } -fill: - if int64(cap(scratch)) < read { - b = make([]byte, read) - } else { - b = scratch[0:read] - } - _, err = m.R.ReadFull(b) - return -} - -// ReadStringHeader reads a string header -// off of the wire. The user is then responsible -// for dealing with the next 'sz' bytes from -// the reader in an application-specific manner. -func (m *Reader) ReadStringHeader() (sz uint32, err error) { - var p []byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead := p[0] - if isfixstr(lead) { - sz = uint32(rfixstr(lead)) - m.R.Skip(1) - return - } - switch lead { - case mstr8: - p, err = m.R.Next(2) - if err != nil { - return - } - sz = uint32(p[1]) - return - case mstr16: - p, err = m.R.Next(3) - if err != nil { - return - } - sz = uint32(big.Uint16(p[1:])) - return - case mstr32: - p, err = m.R.Next(5) - if err != nil { - return - } - sz = big.Uint32(p[1:]) - return - default: - err = badPrefix(StrType, lead) - return - } -} - -// ReadString reads a utf-8 string from the reader -func (m *Reader) ReadString() (s string, err error) { - var p []byte - var lead byte - var read int64 - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - - if isfixstr(lead) { - read = int64(rfixstr(lead)) - m.R.Skip(1) - goto fill - } - - switch lead { - case mstr8: - p, err = m.R.Next(2) - if err != nil { - return - } - read = int64(uint8(p[1])) - case mstr16: - p, err = m.R.Next(3) - if err != nil { - return - } - read = int64(big.Uint16(p[1:])) - case mstr32: - p, err = m.R.Next(5) - if err != nil { - return - } - read = int64(big.Uint32(p[1:])) - default: - err = badPrefix(StrType, lead) - return - } -fill: - if read == 0 { - s, err = "", nil - return - } - // reading into the memory - // that will become the string - // itself has vastly superior - // worst-case performance, because - // the reader buffer doesn't have - // to be large enough to hold the string. - // the idea here is to make it more - // difficult for someone malicious - // to cause the system to run out of - // memory by sending very large strings. - // - // NOTE: this works because the argument - // passed to (*fwd.Reader).ReadFull escapes - // to the heap; its argument may, in turn, - // be passed to the underlying reader, and - // thus escape analysis *must* conclude that - // 'out' escapes. - out := make([]byte, read) - _, err = m.R.ReadFull(out) - if err != nil { - return - } - s = UnsafeString(out) - return -} - -// ReadComplex64 reads a complex64 from the reader -func (m *Reader) ReadComplex64() (f complex64, err error) { - var p []byte - p, err = m.R.Peek(10) - if err != nil { - return - } - if p[0] != mfixext8 { - err = badPrefix(Complex64Type, p[0]) - return - } - if int8(p[1]) != Complex64Extension { - err = errExt(int8(p[1]), Complex64Extension) - return - } - f = complex(math.Float32frombits(big.Uint32(p[2:])), - math.Float32frombits(big.Uint32(p[6:]))) - _, err = m.R.Skip(10) - return -} - -// ReadComplex128 reads a complex128 from the reader -func (m *Reader) ReadComplex128() (f complex128, err error) { - var p []byte - p, err = m.R.Peek(18) - if err != nil { - return - } - if p[0] != mfixext16 { - err = badPrefix(Complex128Type, p[0]) - return - } - if int8(p[1]) != Complex128Extension { - err = errExt(int8(p[1]), Complex128Extension) - return - } - f = complex(math.Float64frombits(big.Uint64(p[2:])), - math.Float64frombits(big.Uint64(p[10:]))) - _, err = m.R.Skip(18) - return -} - -// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}. -// (You must pass a non-nil map into the function.) -func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) { - var sz uint32 - sz, err = m.ReadMapHeader() - if err != nil { - return - } - for key := range mp { - delete(mp, key) - } - for i := uint32(0); i < sz; i++ { - var key string - var val interface{} - key, err = m.ReadString() - if err != nil { - return - } - val, err = m.ReadIntf() - if err != nil { - return - } - mp[key] = val - } - return -} - -// ReadTime reads a time.Time object from the reader. -// The returned time's location will be set to time.Local. -func (m *Reader) ReadTime() (t time.Time, err error) { - var p []byte - p, err = m.R.Peek(15) - if err != nil { - return - } - if p[0] != mext8 || p[1] != 12 { - err = badPrefix(TimeType, p[0]) - return - } - if int8(p[2]) != TimeExtension { - err = errExt(int8(p[2]), TimeExtension) - return - } - sec, nsec := getUnix(p[3:]) - t = time.Unix(sec, int64(nsec)).Local() - _, err = m.R.Skip(15) - return -} - -// ReadIntf reads out the next object as a raw interface{}. -// Arrays are decoded as []interface{}, and maps are decoded -// as map[string]interface{}. Integers are decoded as int64 -// and unsigned integers are decoded as uint64. -func (m *Reader) ReadIntf() (i interface{}, err error) { - var t Type - t, err = m.NextType() - if err != nil { - return - } - switch t { - case BoolType: - i, err = m.ReadBool() - return - - case IntType: - i, err = m.ReadInt64() - return - - case UintType: - i, err = m.ReadUint64() - return - - case BinType: - i, err = m.ReadBytes(nil) - return - - case StrType: - i, err = m.ReadString() - return - - case Complex64Type: - i, err = m.ReadComplex64() - return - - case Complex128Type: - i, err = m.ReadComplex128() - return - - case TimeType: - i, err = m.ReadTime() - return - - case ExtensionType: - var t int8 - t, err = m.peekExtensionType() - if err != nil { - return - } - f, ok := extensionReg[t] - if ok { - e := f() - err = m.ReadExtension(e) - i = e - return - } - var e RawExtension - e.Type = t - err = m.ReadExtension(&e) - i = &e - return - - case MapType: - mp := make(map[string]interface{}) - err = m.ReadMapStrIntf(mp) - i = mp - return - - case NilType: - err = m.ReadNil() - i = nil - return - - case Float32Type: - i, err = m.ReadFloat32() - return - - case Float64Type: - i, err = m.ReadFloat64() - return - - case ArrayType: - var sz uint32 - sz, err = m.ReadArrayHeader() - - if err != nil { - return - } - out := make([]interface{}, int(sz)) - for j := range out { - out[j], err = m.ReadIntf() - if err != nil { - return - } - } - i = out - return - - default: - return nil, fatal // unreachable - } -} diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go deleted file mode 100644 index e41997578..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go +++ /dev/null @@ -1,1197 +0,0 @@ -package msgp - -import ( - "bytes" - "encoding/binary" - "math" - "time" -) - -var big = binary.BigEndian - -// NextType returns the type of the next -// object in the slice. If the length -// of the input is zero, it returns -// InvalidType. -func NextType(b []byte) Type { - if len(b) == 0 { - return InvalidType - } - spec := sizes[b[0]] - t := spec.typ - if t == ExtensionType && len(b) > int(spec.size) { - var tp int8 - if spec.extra == constsize { - tp = int8(b[1]) - } else { - tp = int8(b[spec.size-1]) - } - switch tp { - case TimeExtension: - return TimeType - case Complex128Extension: - return Complex128Type - case Complex64Extension: - return Complex64Type - default: - return ExtensionType - } - } - return t -} - -// IsNil returns true if len(b)>0 and -// the leading byte is a 'nil' MessagePack -// byte; false otherwise -func IsNil(b []byte) bool { - if len(b) != 0 && b[0] == mnil { - return true - } - return false -} - -// Raw is raw MessagePack. -// Raw allows you to read and write -// data without interpreting its contents. -type Raw []byte - -// MarshalMsg implements msgp.Marshaler. -// It appends the raw contents of 'raw' -// to the provided byte slice. If 'raw' -// is 0 bytes, 'nil' will be appended instead. -func (r Raw) MarshalMsg(b []byte) ([]byte, error) { - i := len(r) - if i == 0 { - return AppendNil(b), nil - } - o, l := ensure(b, i) - copy(o[l:], []byte(r)) - return o, nil -} - -// UnmarshalMsg implements msgp.Unmarshaler. -// It sets the contents of *Raw to be the next -// object in the provided byte slice. -func (r *Raw) UnmarshalMsg(b []byte) ([]byte, error) { - l := len(b) - out, err := Skip(b) - if err != nil { - return b, err - } - rlen := l - len(out) - if IsNil(b[:rlen]) { - rlen = 0 - } - if cap(*r) < rlen { - *r = make(Raw, rlen) - } else { - *r = (*r)[0:rlen] - } - copy(*r, b[:rlen]) - return out, nil -} - -// EncodeMsg implements msgp.Encodable. -// It writes the raw bytes to the writer. -// If r is empty, it writes 'nil' instead. -func (r Raw) EncodeMsg(w *Writer) error { - if len(r) == 0 { - return w.WriteNil() - } - _, err := w.Write([]byte(r)) - return err -} - -// DecodeMsg implements msgp.Decodable. -// It sets the value of *Raw to be the -// next object on the wire. -func (r *Raw) DecodeMsg(f *Reader) error { - *r = (*r)[:0] - err := appendNext(f, (*[]byte)(r)) - if IsNil(*r) { - *r = (*r)[:0] - } - return err -} - -// Msgsize implements msgp.Sizer -func (r Raw) Msgsize() int { - l := len(r) - if l == 0 { - return 1 // for 'nil' - } - return l -} - -func appendNext(f *Reader, d *[]byte) error { - amt, o, err := getNextSize(f.R) - if err != nil { - return err - } - var i int - *d, i = ensure(*d, int(amt)) - _, err = f.R.ReadFull((*d)[i:]) - if err != nil { - return err - } - for o > 0 { - err = appendNext(f, d) - if err != nil { - return err - } - o-- - } - return nil -} - -// MarshalJSON implements json.Marshaler -func (r *Raw) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - _, err := UnmarshalAsJSON(&buf, []byte(*r)) - return buf.Bytes(), err -} - -// ReadMapHeaderBytes reads a map header size -// from 'b' and returns the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a map) -func ReadMapHeaderBytes(b []byte) (sz uint32, o []byte, err error) { - l := len(b) - if l < 1 { - err = ErrShortBytes - return - } - - lead := b[0] - if isfixmap(lead) { - sz = uint32(rfixmap(lead)) - o = b[1:] - return - } - - switch lead { - case mmap16: - if l < 3 { - err = ErrShortBytes - return - } - sz = uint32(big.Uint16(b[1:])) - o = b[3:] - return - - case mmap32: - if l < 5 { - err = ErrShortBytes - return - } - sz = big.Uint32(b[1:]) - o = b[5:] - return - - default: - err = badPrefix(MapType, lead) - return - } -} - -// ReadMapKeyZC attempts to read a map key -// from 'b' and returns the key bytes and the remaining bytes -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a str or bin) -func ReadMapKeyZC(b []byte) ([]byte, []byte, error) { - o, x, err := ReadStringZC(b) - if err != nil { - if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { - return ReadBytesZC(b) - } - return nil, b, err - } - return o, x, nil -} - -// ReadArrayHeaderBytes attempts to read -// the array header size off of 'b' and return -// the size and remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not an array) -func ReadArrayHeaderBytes(b []byte) (sz uint32, o []byte, err error) { - if len(b) < 1 { - return 0, nil, ErrShortBytes - } - lead := b[0] - if isfixarray(lead) { - sz = uint32(rfixarray(lead)) - o = b[1:] - return - } - - switch lead { - case marray16: - if len(b) < 3 { - err = ErrShortBytes - return - } - sz = uint32(big.Uint16(b[1:])) - o = b[3:] - return - - case marray32: - if len(b) < 5 { - err = ErrShortBytes - return - } - sz = big.Uint32(b[1:]) - o = b[5:] - return - - default: - err = badPrefix(ArrayType, lead) - return - } -} - -// ReadNilBytes tries to read a "nil" byte -// off of 'b' and return the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a 'nil') -// - InvalidPrefixError -func ReadNilBytes(b []byte) ([]byte, error) { - if len(b) < 1 { - return nil, ErrShortBytes - } - if b[0] != mnil { - return b, badPrefix(NilType, b[0]) - } - return b[1:], nil -} - -// ReadFloat64Bytes tries to read a float64 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a float64) -func ReadFloat64Bytes(b []byte) (f float64, o []byte, err error) { - if len(b) < 9 { - if len(b) >= 5 && b[0] == mfloat32 { - var tf float32 - tf, o, err = ReadFloat32Bytes(b) - f = float64(tf) - return - } - err = ErrShortBytes - return - } - - if b[0] != mfloat64 { - if b[0] == mfloat32 { - var tf float32 - tf, o, err = ReadFloat32Bytes(b) - f = float64(tf) - return - } - err = badPrefix(Float64Type, b[0]) - return - } - - f = math.Float64frombits(getMuint64(b)) - o = b[9:] - return -} - -// ReadFloat32Bytes tries to read a float64 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a float32) -func ReadFloat32Bytes(b []byte) (f float32, o []byte, err error) { - if len(b) < 5 { - err = ErrShortBytes - return - } - - if b[0] != mfloat32 { - err = TypeError{Method: Float32Type, Encoded: getType(b[0])} - return - } - - f = math.Float32frombits(getMuint32(b)) - o = b[5:] - return -} - -// ReadBoolBytes tries to read a float64 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a bool) -func ReadBoolBytes(b []byte) (bool, []byte, error) { - if len(b) < 1 { - return false, b, ErrShortBytes - } - switch b[0] { - case mtrue: - return true, b[1:], nil - case mfalse: - return false, b[1:], nil - default: - return false, b, badPrefix(BoolType, b[0]) - } -} - -// ReadInt64Bytes tries to read an int64 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError (not a int) -func ReadInt64Bytes(b []byte) (i int64, o []byte, err error) { - l := len(b) - if l < 1 { - return 0, nil, ErrShortBytes - } - - lead := b[0] - if isfixint(lead) { - i = int64(rfixint(lead)) - o = b[1:] - return - } - if isnfixint(lead) { - i = int64(rnfixint(lead)) - o = b[1:] - return - } - - switch lead { - case mint8: - if l < 2 { - err = ErrShortBytes - return - } - i = int64(getMint8(b)) - o = b[2:] - return - - case muint8: - if l < 2 { - err = ErrShortBytes - return - } - i = int64(getMuint8(b)) - o = b[2:] - return - - case mint16: - if l < 3 { - err = ErrShortBytes - return - } - i = int64(getMint16(b)) - o = b[3:] - return - - case muint16: - if l < 3 { - err = ErrShortBytes - return - } - i = int64(getMuint16(b)) - o = b[3:] - return - - case mint32: - if l < 5 { - err = ErrShortBytes - return - } - i = int64(getMint32(b)) - o = b[5:] - return - - case muint32: - if l < 5 { - err = ErrShortBytes - return - } - i = int64(getMuint32(b)) - o = b[5:] - return - - case mint64: - if l < 9 { - err = ErrShortBytes - return - } - i = int64(getMint64(b)) - o = b[9:] - return - - case muint64: - if l < 9 { - err = ErrShortBytes - return - } - u := getMuint64(b) - if u > math.MaxInt64 { - err = UintOverflow{Value: u, FailedBitsize: 64} - return - } - i = int64(u) - o = b[9:] - return - - default: - err = badPrefix(IntType, lead) - return - } -} - -// ReadInt32Bytes tries to read an int32 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a int) -// - IntOverflow{} (value doesn't fit in int32) -func ReadInt32Bytes(b []byte) (int32, []byte, error) { - i, o, err := ReadInt64Bytes(b) - if i > math.MaxInt32 || i < math.MinInt32 { - return 0, o, IntOverflow{Value: i, FailedBitsize: 32} - } - return int32(i), o, err -} - -// ReadInt16Bytes tries to read an int16 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a int) -// - IntOverflow{} (value doesn't fit in int16) -func ReadInt16Bytes(b []byte) (int16, []byte, error) { - i, o, err := ReadInt64Bytes(b) - if i > math.MaxInt16 || i < math.MinInt16 { - return 0, o, IntOverflow{Value: i, FailedBitsize: 16} - } - return int16(i), o, err -} - -// ReadInt8Bytes tries to read an int16 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a int) -// - IntOverflow{} (value doesn't fit in int8) -func ReadInt8Bytes(b []byte) (int8, []byte, error) { - i, o, err := ReadInt64Bytes(b) - if i > math.MaxInt8 || i < math.MinInt8 { - return 0, o, IntOverflow{Value: i, FailedBitsize: 8} - } - return int8(i), o, err -} - -// ReadIntBytes tries to read an int -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a int) -// - IntOverflow{} (value doesn't fit in int; 32-bit platforms only) -func ReadIntBytes(b []byte) (int, []byte, error) { - if smallint { - i, b, err := ReadInt32Bytes(b) - return int(i), b, err - } - i, b, err := ReadInt64Bytes(b) - return int(i), b, err -} - -// ReadUint64Bytes tries to read a uint64 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a uint) -func ReadUint64Bytes(b []byte) (u uint64, o []byte, err error) { - l := len(b) - if l < 1 { - return 0, nil, ErrShortBytes - } - - lead := b[0] - if isfixint(lead) { - u = uint64(rfixint(lead)) - o = b[1:] - return - } - - switch lead { - case mint8: - if l < 2 { - err = ErrShortBytes - return - } - v := int64(getMint8(b)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - o = b[2:] - return - - case muint8: - if l < 2 { - err = ErrShortBytes - return - } - u = uint64(getMuint8(b)) - o = b[2:] - return - - case mint16: - if l < 3 { - err = ErrShortBytes - return - } - v := int64(getMint16(b)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - o = b[3:] - return - - case muint16: - if l < 3 { - err = ErrShortBytes - return - } - u = uint64(getMuint16(b)) - o = b[3:] - return - - case mint32: - if l < 5 { - err = ErrShortBytes - return - } - v := int64(getMint32(b)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - o = b[5:] - return - - case muint32: - if l < 5 { - err = ErrShortBytes - return - } - u = uint64(getMuint32(b)) - o = b[5:] - return - - case mint64: - if l < 9 { - err = ErrShortBytes - return - } - v := int64(getMint64(b)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - o = b[9:] - return - - case muint64: - if l < 9 { - err = ErrShortBytes - return - } - u = getMuint64(b) - o = b[9:] - return - - default: - if isnfixint(lead) { - err = UintBelowZero{Value: int64(rnfixint(lead))} - } else { - err = badPrefix(UintType, lead) - } - return - } -} - -// ReadUint32Bytes tries to read a uint32 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a uint) -// - UintOverflow{} (value too large for uint32) -func ReadUint32Bytes(b []byte) (uint32, []byte, error) { - v, o, err := ReadUint64Bytes(b) - if v > math.MaxUint32 { - return 0, nil, UintOverflow{Value: v, FailedBitsize: 32} - } - return uint32(v), o, err -} - -// ReadUint16Bytes tries to read a uint16 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a uint) -// - UintOverflow{} (value too large for uint16) -func ReadUint16Bytes(b []byte) (uint16, []byte, error) { - v, o, err := ReadUint64Bytes(b) - if v > math.MaxUint16 { - return 0, nil, UintOverflow{Value: v, FailedBitsize: 16} - } - return uint16(v), o, err -} - -// ReadUint8Bytes tries to read a uint8 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a uint) -// - UintOverflow{} (value too large for uint8) -func ReadUint8Bytes(b []byte) (uint8, []byte, error) { - v, o, err := ReadUint64Bytes(b) - if v > math.MaxUint8 { - return 0, nil, UintOverflow{Value: v, FailedBitsize: 8} - } - return uint8(v), o, err -} - -// ReadUintBytes tries to read a uint -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a uint) -// - UintOverflow{} (value too large for uint; 32-bit platforms only) -func ReadUintBytes(b []byte) (uint, []byte, error) { - if smallint { - u, b, err := ReadUint32Bytes(b) - return uint(u), b, err - } - u, b, err := ReadUint64Bytes(b) - return uint(u), b, err -} - -// ReadByteBytes is analogous to ReadUint8Bytes -func ReadByteBytes(b []byte) (byte, []byte, error) { - return ReadUint8Bytes(b) -} - -// ReadBytesBytes reads a 'bin' object -// from 'b' and returns its vaue and -// the remaining bytes in 'b'. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a 'bin' object) -func ReadBytesBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { - return readBytesBytes(b, scratch, false) -} - -func readBytesBytes(b []byte, scratch []byte, zc bool) (v []byte, o []byte, err error) { - l := len(b) - if l < 1 { - return nil, nil, ErrShortBytes - } - - lead := b[0] - var read int - switch lead { - case mbin8: - if l < 2 { - err = ErrShortBytes - return - } - - read = int(b[1]) - b = b[2:] - - case mbin16: - if l < 3 { - err = ErrShortBytes - return - } - read = int(big.Uint16(b[1:])) - b = b[3:] - - case mbin32: - if l < 5 { - err = ErrShortBytes - return - } - read = int(big.Uint32(b[1:])) - b = b[5:] - - default: - err = badPrefix(BinType, lead) - return - } - - if len(b) < read { - err = ErrShortBytes - return - } - - // zero-copy - if zc { - v = b[0:read] - o = b[read:] - return - } - - if cap(scratch) >= read { - v = scratch[0:read] - } else { - v = make([]byte, read) - } - - o = b[copy(v, b):] - return -} - -// ReadBytesZC extracts the messagepack-encoded -// binary field without copying. The returned []byte -// points to the same memory as the input slice. -// Possible errors: -// - ErrShortBytes (b not long enough) -// - TypeError{} (object not 'bin') -func ReadBytesZC(b []byte) (v []byte, o []byte, err error) { - return readBytesBytes(b, nil, true) -} - -func ReadExactBytes(b []byte, into []byte) (o []byte, err error) { - l := len(b) - if l < 1 { - err = ErrShortBytes - return - } - - lead := b[0] - var read uint32 - var skip int - switch lead { - case mbin8: - if l < 2 { - err = ErrShortBytes - return - } - - read = uint32(b[1]) - skip = 2 - - case mbin16: - if l < 3 { - err = ErrShortBytes - return - } - read = uint32(big.Uint16(b[1:])) - skip = 3 - - case mbin32: - if l < 5 { - err = ErrShortBytes - return - } - read = uint32(big.Uint32(b[1:])) - skip = 5 - - default: - err = badPrefix(BinType, lead) - return - } - - if read != uint32(len(into)) { - err = ArrayError{Wanted: uint32(len(into)), Got: read} - return - } - - o = b[skip+copy(into, b[skip:]):] - return -} - -// ReadStringZC reads a messagepack string field -// without copying. The returned []byte points -// to the same memory as the input slice. -// Possible errors: -// - ErrShortBytes (b not long enough) -// - TypeError{} (object not 'str') -func ReadStringZC(b []byte) (v []byte, o []byte, err error) { - l := len(b) - if l < 1 { - return nil, nil, ErrShortBytes - } - - lead := b[0] - var read int - - if isfixstr(lead) { - read = int(rfixstr(lead)) - b = b[1:] - } else { - switch lead { - case mstr8: - if l < 2 { - err = ErrShortBytes - return - } - read = int(b[1]) - b = b[2:] - - case mstr16: - if l < 3 { - err = ErrShortBytes - return - } - read = int(big.Uint16(b[1:])) - b = b[3:] - - case mstr32: - if l < 5 { - err = ErrShortBytes - return - } - read = int(big.Uint32(b[1:])) - b = b[5:] - - default: - err = TypeError{Method: StrType, Encoded: getType(lead)} - return - } - } - - if len(b) < read { - err = ErrShortBytes - return - } - - v = b[0:read] - o = b[read:] - return -} - -// ReadStringBytes reads a 'str' object -// from 'b' and returns its value and the -// remaining bytes in 'b'. -// Possible errors: -// - ErrShortBytes (b not long enough) -// - TypeError{} (not 'str' type) -// - InvalidPrefixError -func ReadStringBytes(b []byte) (string, []byte, error) { - v, o, err := ReadStringZC(b) - return string(v), o, err -} - -// ReadStringAsBytes reads a 'str' object -// into a slice of bytes. 'v' is the value of -// the 'str' object, which may reside in memory -// pointed to by 'scratch.' 'o' is the remaining bytes -// in 'b.'' -// Possible errors: -// - ErrShortBytes (b not long enough) -// - TypeError{} (not 'str' type) -// - InvalidPrefixError (unknown type marker) -func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { - var tmp []byte - tmp, o, err = ReadStringZC(b) - v = append(scratch[:0], tmp...) - return -} - -// ReadComplex128Bytes reads a complex128 -// extension object from 'b' and returns the -// remaining bytes. -// Possible errors: -// - ErrShortBytes (not enough bytes in 'b') -// - TypeError{} (object not a complex128) -// - InvalidPrefixError -// - ExtensionTypeError{} (object an extension of the correct size, but not a complex128) -func ReadComplex128Bytes(b []byte) (c complex128, o []byte, err error) { - if len(b) < 18 { - err = ErrShortBytes - return - } - if b[0] != mfixext16 { - err = badPrefix(Complex128Type, b[0]) - return - } - if int8(b[1]) != Complex128Extension { - err = errExt(int8(b[1]), Complex128Extension) - return - } - c = complex(math.Float64frombits(big.Uint64(b[2:])), - math.Float64frombits(big.Uint64(b[10:]))) - o = b[18:] - return -} - -// ReadComplex64Bytes reads a complex64 -// extension object from 'b' and returns the -// remaining bytes. -// Possible errors: -// - ErrShortBytes (not enough bytes in 'b') -// - TypeError{} (object not a complex64) -// - ExtensionTypeError{} (object an extension of the correct size, but not a complex64) -func ReadComplex64Bytes(b []byte) (c complex64, o []byte, err error) { - if len(b) < 10 { - err = ErrShortBytes - return - } - if b[0] != mfixext8 { - err = badPrefix(Complex64Type, b[0]) - return - } - if b[1] != Complex64Extension { - err = errExt(int8(b[1]), Complex64Extension) - return - } - c = complex(math.Float32frombits(big.Uint32(b[2:])), - math.Float32frombits(big.Uint32(b[6:]))) - o = b[10:] - return -} - -// ReadTimeBytes reads a time.Time -// extension object from 'b' and returns the -// remaining bytes. -// Possible errors: -// - ErrShortBytes (not enough bytes in 'b') -// - TypeError{} (object not a complex64) -// - ExtensionTypeError{} (object an extension of the correct size, but not a time.Time) -func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) { - if len(b) < 15 { - err = ErrShortBytes - return - } - if b[0] != mext8 || b[1] != 12 { - err = badPrefix(TimeType, b[0]) - return - } - if int8(b[2]) != TimeExtension { - err = errExt(int8(b[2]), TimeExtension) - return - } - sec, nsec := getUnix(b[3:]) - t = time.Unix(sec, int64(nsec)).Local() - o = b[15:] - return -} - -// ReadMapStrIntfBytes reads a map[string]interface{} -// out of 'b' and returns the map and remaining bytes. -// If 'old' is non-nil, the values will be read into that map. -func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) { - var sz uint32 - o = b - sz, o, err = ReadMapHeaderBytes(o) - - if err != nil { - return - } - - if old != nil { - for key := range old { - delete(old, key) - } - v = old - } else { - v = make(map[string]interface{}, int(sz)) - } - - for z := uint32(0); z < sz; z++ { - if len(o) < 1 { - err = ErrShortBytes - return - } - var key []byte - key, o, err = ReadMapKeyZC(o) - if err != nil { - return - } - var val interface{} - val, o, err = ReadIntfBytes(o) - if err != nil { - return - } - v[string(key)] = val - } - return -} - -// ReadIntfBytes attempts to read -// the next object out of 'b' as a raw interface{} and -// return the remaining bytes. -func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { - if len(b) < 1 { - err = ErrShortBytes - return - } - - k := NextType(b) - - switch k { - case MapType: - i, o, err = ReadMapStrIntfBytes(b, nil) - return - - case ArrayType: - var sz uint32 - sz, o, err = ReadArrayHeaderBytes(b) - if err != nil { - return - } - j := make([]interface{}, int(sz)) - i = j - for d := range j { - j[d], o, err = ReadIntfBytes(o) - if err != nil { - return - } - } - return - - case Float32Type: - i, o, err = ReadFloat32Bytes(b) - return - - case Float64Type: - i, o, err = ReadFloat64Bytes(b) - return - - case IntType: - i, o, err = ReadInt64Bytes(b) - return - - case UintType: - i, o, err = ReadUint64Bytes(b) - return - - case BoolType: - i, o, err = ReadBoolBytes(b) - return - - case TimeType: - i, o, err = ReadTimeBytes(b) - return - - case Complex64Type: - i, o, err = ReadComplex64Bytes(b) - return - - case Complex128Type: - i, o, err = ReadComplex128Bytes(b) - return - - case ExtensionType: - var t int8 - t, err = peekExtension(b) - if err != nil { - return - } - // use a user-defined extension, - // if it's been registered - f, ok := extensionReg[t] - if ok { - e := f() - o, err = ReadExtensionBytes(b, e) - i = e - return - } - // last resort is a raw extension - e := RawExtension{} - e.Type = int8(t) - o, err = ReadExtensionBytes(b, &e) - i = &e - return - - case NilType: - o, err = ReadNilBytes(b) - return - - case BinType: - i, o, err = ReadBytesBytes(b, nil) - return - - case StrType: - i, o, err = ReadStringBytes(b) - return - - default: - err = InvalidPrefixError(b[0]) - return - } -} - -// Skip skips the next object in 'b' and -// returns the remaining bytes. If the object -// is a map or array, all of its elements -// will be skipped. -// Possible Errors: -// - ErrShortBytes (not enough bytes in b) -// - InvalidPrefixError (bad encoding) -func Skip(b []byte) ([]byte, error) { - sz, asz, err := getSize(b) - if err != nil { - return b, err - } - if uintptr(len(b)) < sz { - return b, ErrShortBytes - } - b = b[sz:] - for asz > 0 { - b, err = Skip(b) - if err != nil { - return b, err - } - asz-- - } - return b, nil -} - -// returns (skip N bytes, skip M objects, error) -func getSize(b []byte) (uintptr, uintptr, error) { - l := len(b) - if l == 0 { - return 0, 0, ErrShortBytes - } - lead := b[0] - spec := &sizes[lead] // get type information - size, mode := spec.size, spec.extra - if size == 0 { - return 0, 0, InvalidPrefixError(lead) - } - if mode >= 0 { // fixed composites - return uintptr(size), uintptr(mode), nil - } - if l < int(size) { - return 0, 0, ErrShortBytes - } - switch mode { - case extra8: - return uintptr(size) + uintptr(b[1]), 0, nil - case extra16: - return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil - case extra32: - return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil - case map16v: - return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil - case map32v: - return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil - case array16v: - return uintptr(size), uintptr(big.Uint16(b[1:])), nil - case array32v: - return uintptr(size), uintptr(big.Uint32(b[1:])), nil - default: - return 0, 0, fatal - } -} diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go deleted file mode 100644 index ce2f8b16f..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/size.go +++ /dev/null @@ -1,38 +0,0 @@ -package msgp - -// The sizes provided -// are the worst-case -// encoded sizes for -// each type. For variable- -// length types ([]byte, string), -// the total encoded size is -// the prefix size plus the -// length of the object. -const ( - Int64Size = 9 - IntSize = Int64Size - UintSize = Int64Size - Int8Size = 2 - Int16Size = 3 - Int32Size = 5 - Uint8Size = 2 - ByteSize = Uint8Size - Uint16Size = 3 - Uint32Size = 5 - Uint64Size = Int64Size - Float64Size = 9 - Float32Size = 5 - Complex64Size = 10 - Complex128Size = 18 - - TimeSize = 15 - BoolSize = 1 - NilSize = 1 - - MapHeaderSize = 5 - ArrayHeaderSize = 5 - - BytesPrefixSize = 5 - StringPrefixSize = 5 - ExtensionPrefixSize = 6 -) diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go deleted file mode 100644 index 3978b6ff6..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/unsafe.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !purego,!appengine - -package msgp - -import ( - "reflect" - "unsafe" -) - -// NOTE: -// all of the definition in this file -// should be repeated in appengine.go, -// but without using unsafe - -const ( - // spec says int and uint are always - // the same size, but that int/uint - // size may not be machine word size - smallint = unsafe.Sizeof(int(0)) == 4 -) - -// UnsafeString returns the byte slice as a volatile string -// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. -// THIS IS EVIL CODE. -// YOU HAVE BEEN WARNED. -func UnsafeString(b []byte) string { - sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: sh.Data, Len: sh.Len})) -} - -// UnsafeBytes returns the string as a byte slice -// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. -// THIS IS EVIL CODE. -// YOU HAVE BEEN WARNED. -func UnsafeBytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Len: len(s), - Cap: len(s), - Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data, - })) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go deleted file mode 100644 index fb1947c57..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/write.go +++ /dev/null @@ -1,845 +0,0 @@ -package msgp - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "sync" - "time" -) - -// Sizer is an interface implemented -// by types that can estimate their -// size when MessagePack encoded. -// This interface is optional, but -// encoding/marshaling implementations -// may use this as a way to pre-allocate -// memory for serialization. -type Sizer interface { - Msgsize() int -} - -var ( - // Nowhere is an io.Writer to nowhere - Nowhere io.Writer = nwhere{} - - btsType = reflect.TypeOf(([]byte)(nil)) - writerPool = sync.Pool{ - New: func() interface{} { - return &Writer{buf: make([]byte, 2048)} - }, - } -) - -func popWriter(w io.Writer) *Writer { - wr := writerPool.Get().(*Writer) - wr.Reset(w) - return wr -} - -func pushWriter(wr *Writer) { - wr.w = nil - wr.wloc = 0 - writerPool.Put(wr) -} - -// freeW frees a writer for use -// by other processes. It is not necessary -// to call freeW on a writer. However, maintaining -// a reference to a *Writer after calling freeW on -// it will cause undefined behavior. -func freeW(w *Writer) { pushWriter(w) } - -// Require ensures that cap(old)-len(old) >= extra. -func Require(old []byte, extra int) []byte { - l := len(old) - c := cap(old) - r := l + extra - if c >= r { - return old - } else if l == 0 { - return make([]byte, 0, extra) - } - // the new size is the greater - // of double the old capacity - // and the sum of the old length - // and the number of new bytes - // necessary. - c <<= 1 - if c < r { - c = r - } - n := make([]byte, l, c) - copy(n, old) - return n -} - -// nowhere writer -type nwhere struct{} - -func (n nwhere) Write(p []byte) (int, error) { return len(p), nil } - -// Marshaler is the interface implemented -// by types that know how to marshal themselves -// as MessagePack. MarshalMsg appends the marshalled -// form of the object to the provided -// byte slice, returning the extended -// slice and any errors encountered. -type Marshaler interface { - MarshalMsg([]byte) ([]byte, error) -} - -// Encodable is the interface implemented -// by types that know how to write themselves -// as MessagePack using a *msgp.Writer. -type Encodable interface { - EncodeMsg(*Writer) error -} - -// Writer is a buffered writer -// that can be used to write -// MessagePack objects to an io.Writer. -// You must call *Writer.Flush() in order -// to flush all of the buffered data -// to the underlying writer. -type Writer struct { - w io.Writer - buf []byte - wloc int -} - -// NewWriter returns a new *Writer. -func NewWriter(w io.Writer) *Writer { - if wr, ok := w.(*Writer); ok { - return wr - } - return popWriter(w) -} - -// NewWriterSize returns a writer with a custom buffer size. -func NewWriterSize(w io.Writer, sz int) *Writer { - // we must be able to require() 18 - // contiguous bytes, so that is the - // practical minimum buffer size - if sz < 18 { - sz = 18 - } - - return &Writer{ - w: w, - buf: make([]byte, sz), - } -} - -// Encode encodes an Encodable to an io.Writer. -func Encode(w io.Writer, e Encodable) error { - wr := NewWriter(w) - err := e.EncodeMsg(wr) - if err == nil { - err = wr.Flush() - } - freeW(wr) - return err -} - -func (mw *Writer) flush() error { - if mw.wloc == 0 { - return nil - } - n, err := mw.w.Write(mw.buf[:mw.wloc]) - if err != nil { - if n > 0 { - mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc]) - } - return err - } - mw.wloc = 0 - return nil -} - -// Flush flushes all of the buffered -// data to the underlying writer. -func (mw *Writer) Flush() error { return mw.flush() } - -// Buffered returns the number bytes in the write buffer -func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc } - -func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc } - -func (mw *Writer) bufsize() int { return len(mw.buf) } - -// NOTE: this should only be called with -// a number that is guaranteed to be less than -// len(mw.buf). typically, it is called with a constant. -// -// NOTE: this is a hot code path -func (mw *Writer) require(n int) (int, error) { - c := len(mw.buf) - wl := mw.wloc - if c-wl < n { - if err := mw.flush(); err != nil { - return 0, err - } - wl = mw.wloc - } - mw.wloc += n - return wl, nil -} - -func (mw *Writer) Append(b ...byte) error { - if mw.avail() < len(b) { - err := mw.flush() - if err != nil { - return err - } - } - mw.wloc += copy(mw.buf[mw.wloc:], b) - return nil -} - -// push one byte onto the buffer -// -// NOTE: this is a hot code path -func (mw *Writer) push(b byte) error { - if mw.wloc == len(mw.buf) { - if err := mw.flush(); err != nil { - return err - } - } - mw.buf[mw.wloc] = b - mw.wloc++ - return nil -} - -func (mw *Writer) prefix8(b byte, u uint8) error { - const need = 2 - if len(mw.buf)-mw.wloc < need { - if err := mw.flush(); err != nil { - return err - } - } - prefixu8(mw.buf[mw.wloc:], b, u) - mw.wloc += need - return nil -} - -func (mw *Writer) prefix16(b byte, u uint16) error { - const need = 3 - if len(mw.buf)-mw.wloc < need { - if err := mw.flush(); err != nil { - return err - } - } - prefixu16(mw.buf[mw.wloc:], b, u) - mw.wloc += need - return nil -} - -func (mw *Writer) prefix32(b byte, u uint32) error { - const need = 5 - if len(mw.buf)-mw.wloc < need { - if err := mw.flush(); err != nil { - return err - } - } - prefixu32(mw.buf[mw.wloc:], b, u) - mw.wloc += need - return nil -} - -func (mw *Writer) prefix64(b byte, u uint64) error { - const need = 9 - if len(mw.buf)-mw.wloc < need { - if err := mw.flush(); err != nil { - return err - } - } - prefixu64(mw.buf[mw.wloc:], b, u) - mw.wloc += need - return nil -} - -// Write implements io.Writer, and writes -// data directly to the buffer. -func (mw *Writer) Write(p []byte) (int, error) { - l := len(p) - if mw.avail() < l { - if err := mw.flush(); err != nil { - return 0, err - } - if l > len(mw.buf) { - return mw.w.Write(p) - } - } - mw.wloc += copy(mw.buf[mw.wloc:], p) - return l, nil -} - -// implements io.WriteString -func (mw *Writer) writeString(s string) error { - l := len(s) - if mw.avail() < l { - if err := mw.flush(); err != nil { - return err - } - if l > len(mw.buf) { - _, err := io.WriteString(mw.w, s) - return err - } - } - mw.wloc += copy(mw.buf[mw.wloc:], s) - return nil -} - -// Reset changes the underlying writer used by the Writer -func (mw *Writer) Reset(w io.Writer) { - mw.buf = mw.buf[:cap(mw.buf)] - mw.w = w - mw.wloc = 0 -} - -// WriteMapHeader writes a map header of the given -// size to the writer -func (mw *Writer) WriteMapHeader(sz uint32) error { - switch { - case sz <= 15: - return mw.push(wfixmap(uint8(sz))) - case sz <= math.MaxUint16: - return mw.prefix16(mmap16, uint16(sz)) - default: - return mw.prefix32(mmap32, sz) - } -} - -// WriteArrayHeader writes an array header of the -// given size to the writer -func (mw *Writer) WriteArrayHeader(sz uint32) error { - switch { - case sz <= 15: - return mw.push(wfixarray(uint8(sz))) - case sz <= math.MaxUint16: - return mw.prefix16(marray16, uint16(sz)) - default: - return mw.prefix32(marray32, sz) - } -} - -// WriteNil writes a nil byte to the buffer -func (mw *Writer) WriteNil() error { - return mw.push(mnil) -} - -// WriteFloat64 writes a float64 to the writer -func (mw *Writer) WriteFloat64(f float64) error { - return mw.prefix64(mfloat64, math.Float64bits(f)) -} - -// WriteFloat32 writes a float32 to the writer -func (mw *Writer) WriteFloat32(f float32) error { - return mw.prefix32(mfloat32, math.Float32bits(f)) -} - -// WriteInt64 writes an int64 to the writer -func (mw *Writer) WriteInt64(i int64) error { - if i >= 0 { - switch { - case i <= math.MaxInt8: - return mw.push(wfixint(uint8(i))) - case i <= math.MaxInt16: - return mw.prefix16(mint16, uint16(i)) - case i <= math.MaxInt32: - return mw.prefix32(mint32, uint32(i)) - default: - return mw.prefix64(mint64, uint64(i)) - } - } - switch { - case i >= -32: - return mw.push(wnfixint(int8(i))) - case i >= math.MinInt8: - return mw.prefix8(mint8, uint8(i)) - case i >= math.MinInt16: - return mw.prefix16(mint16, uint16(i)) - case i >= math.MinInt32: - return mw.prefix32(mint32, uint32(i)) - default: - return mw.prefix64(mint64, uint64(i)) - } -} - -// WriteInt8 writes an int8 to the writer -func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) } - -// WriteInt16 writes an int16 to the writer -func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) } - -// WriteInt32 writes an int32 to the writer -func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) } - -// WriteInt writes an int to the writer -func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) } - -// WriteUint64 writes a uint64 to the writer -func (mw *Writer) WriteUint64(u uint64) error { - switch { - case u <= (1<<7)-1: - return mw.push(wfixint(uint8(u))) - case u <= math.MaxUint8: - return mw.prefix8(muint8, uint8(u)) - case u <= math.MaxUint16: - return mw.prefix16(muint16, uint16(u)) - case u <= math.MaxUint32: - return mw.prefix32(muint32, uint32(u)) - default: - return mw.prefix64(muint64, u) - } -} - -// WriteByte is analogous to WriteUint8 -func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) } - -// WriteUint8 writes a uint8 to the writer -func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) } - -// WriteUint16 writes a uint16 to the writer -func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) } - -// WriteUint32 writes a uint32 to the writer -func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) } - -// WriteUint writes a uint to the writer -func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) } - -// WriteBytes writes binary as 'bin' to the writer -func (mw *Writer) WriteBytes(b []byte) error { - sz := uint32(len(b)) - var err error - switch { - case sz <= math.MaxUint8: - err = mw.prefix8(mbin8, uint8(sz)) - case sz <= math.MaxUint16: - err = mw.prefix16(mbin16, uint16(sz)) - default: - err = mw.prefix32(mbin32, sz) - } - if err != nil { - return err - } - _, err = mw.Write(b) - return err -} - -// WriteBytesHeader writes just the size header -// of a MessagePack 'bin' object. The user is responsible -// for then writing 'sz' more bytes into the stream. -func (mw *Writer) WriteBytesHeader(sz uint32) error { - switch { - case sz <= math.MaxUint8: - return mw.prefix8(mbin8, uint8(sz)) - case sz <= math.MaxUint16: - return mw.prefix16(mbin16, uint16(sz)) - default: - return mw.prefix32(mbin32, sz) - } -} - -// WriteBool writes a bool to the writer -func (mw *Writer) WriteBool(b bool) error { - if b { - return mw.push(mtrue) - } - return mw.push(mfalse) -} - -// WriteString writes a messagepack string to the writer. -// (This is NOT an implementation of io.StringWriter) -func (mw *Writer) WriteString(s string) error { - sz := uint32(len(s)) - var err error - switch { - case sz <= 31: - err = mw.push(wfixstr(uint8(sz))) - case sz <= math.MaxUint8: - err = mw.prefix8(mstr8, uint8(sz)) - case sz <= math.MaxUint16: - err = mw.prefix16(mstr16, uint16(sz)) - default: - err = mw.prefix32(mstr32, sz) - } - if err != nil { - return err - } - return mw.writeString(s) -} - -// WriteStringHeader writes just the string size -// header of a MessagePack 'str' object. The user -// is responsible for writing 'sz' more valid UTF-8 -// bytes to the stream. -func (mw *Writer) WriteStringHeader(sz uint32) error { - switch { - case sz <= 31: - return mw.push(wfixstr(uint8(sz))) - case sz <= math.MaxUint8: - return mw.prefix8(mstr8, uint8(sz)) - case sz <= math.MaxUint16: - return mw.prefix16(mstr16, uint16(sz)) - default: - return mw.prefix32(mstr32, sz) - } -} - -// WriteStringFromBytes writes a 'str' object -// from a []byte. -func (mw *Writer) WriteStringFromBytes(str []byte) error { - sz := uint32(len(str)) - var err error - switch { - case sz <= 31: - err = mw.push(wfixstr(uint8(sz))) - case sz <= math.MaxUint8: - err = mw.prefix8(mstr8, uint8(sz)) - case sz <= math.MaxUint16: - err = mw.prefix16(mstr16, uint16(sz)) - default: - err = mw.prefix32(mstr32, sz) - } - if err != nil { - return err - } - _, err = mw.Write(str) - return err -} - -// WriteComplex64 writes a complex64 to the writer -func (mw *Writer) WriteComplex64(f complex64) error { - o, err := mw.require(10) - if err != nil { - return err - } - mw.buf[o] = mfixext8 - mw.buf[o+1] = Complex64Extension - big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f))) - big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f))) - return nil -} - -// WriteComplex128 writes a complex128 to the writer -func (mw *Writer) WriteComplex128(f complex128) error { - o, err := mw.require(18) - if err != nil { - return err - } - mw.buf[o] = mfixext16 - mw.buf[o+1] = Complex128Extension - big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f))) - big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f))) - return nil -} - -// WriteMapStrStr writes a map[string]string to the writer -func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) { - err = mw.WriteMapHeader(uint32(len(mp))) - if err != nil { - return - } - for key, val := range mp { - err = mw.WriteString(key) - if err != nil { - return - } - err = mw.WriteString(val) - if err != nil { - return - } - } - return nil -} - -// WriteMapStrIntf writes a map[string]interface to the writer -func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) { - err = mw.WriteMapHeader(uint32(len(mp))) - if err != nil { - return - } - for key, val := range mp { - err = mw.WriteString(key) - if err != nil { - return - } - err = mw.WriteIntf(val) - if err != nil { - return - } - } - return -} - -// WriteTime writes a time.Time object to the wire. -// -// Time is encoded as Unix time, which means that -// location (time zone) data is removed from the object. -// The encoded object itself is 12 bytes: 8 bytes for -// a big-endian 64-bit integer denoting seconds -// elapsed since "zero" Unix time, followed by 4 bytes -// for a big-endian 32-bit signed integer denoting -// the nanosecond offset of the time. This encoding -// is intended to ease portability across languages. -// (Note that this is *not* the standard time.Time -// binary encoding, because its implementation relies -// heavily on the internal representation used by the -// time package.) -func (mw *Writer) WriteTime(t time.Time) error { - t = t.UTC() - o, err := mw.require(15) - if err != nil { - return err - } - mw.buf[o] = mext8 - mw.buf[o+1] = 12 - mw.buf[o+2] = TimeExtension - putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond())) - return nil -} - -// WriteIntf writes the concrete type of 'v'. -// WriteIntf will error if 'v' is not one of the following: -// - A bool, float, string, []byte, int, uint, or complex -// - A map of supported types (with string keys) -// - An array or slice of supported types -// - A pointer to a supported type -// - A type that satisfies the msgp.Encodable interface -// - A type that satisfies the msgp.Extension interface -func (mw *Writer) WriteIntf(v interface{}) error { - if v == nil { - return mw.WriteNil() - } - switch v := v.(type) { - - // preferred interfaces - - case Encodable: - return v.EncodeMsg(mw) - case Extension: - return mw.WriteExtension(v) - - // concrete types - - case bool: - return mw.WriteBool(v) - case float32: - return mw.WriteFloat32(v) - case float64: - return mw.WriteFloat64(v) - case complex64: - return mw.WriteComplex64(v) - case complex128: - return mw.WriteComplex128(v) - case uint8: - return mw.WriteUint8(v) - case uint16: - return mw.WriteUint16(v) - case uint32: - return mw.WriteUint32(v) - case uint64: - return mw.WriteUint64(v) - case uint: - return mw.WriteUint(v) - case int8: - return mw.WriteInt8(v) - case int16: - return mw.WriteInt16(v) - case int32: - return mw.WriteInt32(v) - case int64: - return mw.WriteInt64(v) - case int: - return mw.WriteInt(v) - case string: - return mw.WriteString(v) - case []byte: - return mw.WriteBytes(v) - case map[string]string: - return mw.WriteMapStrStr(v) - case map[string]interface{}: - return mw.WriteMapStrIntf(v) - case time.Time: - return mw.WriteTime(v) - } - - val := reflect.ValueOf(v) - if !isSupported(val.Kind()) || !val.IsValid() { - return fmt.Errorf("msgp: type %s not supported", val) - } - - switch val.Kind() { - case reflect.Ptr: - if val.IsNil() { - return mw.WriteNil() - } - return mw.WriteIntf(val.Elem().Interface()) - case reflect.Slice: - return mw.writeSlice(val) - case reflect.Map: - return mw.writeMap(val) - } - return &ErrUnsupportedType{T: val.Type()} -} - -func (mw *Writer) writeMap(v reflect.Value) (err error) { - if v.Type().Key().Kind() != reflect.String { - return errors.New("msgp: map keys must be strings") - } - ks := v.MapKeys() - err = mw.WriteMapHeader(uint32(len(ks))) - if err != nil { - return - } - for _, key := range ks { - val := v.MapIndex(key) - err = mw.WriteString(key.String()) - if err != nil { - return - } - err = mw.WriteIntf(val.Interface()) - if err != nil { - return - } - } - return -} - -func (mw *Writer) writeSlice(v reflect.Value) (err error) { - // is []byte - if v.Type().ConvertibleTo(btsType) { - return mw.WriteBytes(v.Bytes()) - } - - sz := uint32(v.Len()) - err = mw.WriteArrayHeader(sz) - if err != nil { - return - } - for i := uint32(0); i < sz; i++ { - err = mw.WriteIntf(v.Index(int(i)).Interface()) - if err != nil { - return - } - } - return -} - -func (mw *Writer) writeStruct(v reflect.Value) error { - if enc, ok := v.Interface().(Encodable); ok { - return enc.EncodeMsg(mw) - } - return fmt.Errorf("msgp: unsupported type: %s", v.Type()) -} - -func (mw *Writer) writeVal(v reflect.Value) error { - if !isSupported(v.Kind()) { - return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) - } - - // shortcut for nil values - if v.IsNil() { - return mw.WriteNil() - } - switch v.Kind() { - case reflect.Bool: - return mw.WriteBool(v.Bool()) - - case reflect.Float32, reflect.Float64: - return mw.WriteFloat64(v.Float()) - - case reflect.Complex64, reflect.Complex128: - return mw.WriteComplex128(v.Complex()) - - case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8: - return mw.WriteInt64(v.Int()) - - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - mw.WriteNil() - } - return mw.writeVal(v.Elem()) - - case reflect.Map: - return mw.writeMap(v) - - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8: - return mw.WriteUint64(v.Uint()) - - case reflect.String: - return mw.WriteString(v.String()) - - case reflect.Slice, reflect.Array: - return mw.writeSlice(v) - - case reflect.Struct: - return mw.writeStruct(v) - - } - return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) -} - -// is the reflect.Kind encodable? -func isSupported(k reflect.Kind) bool { - switch k { - case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer: - return false - default: - return true - } -} - -// GuessSize guesses the size of the underlying -// value of 'i'. If the underlying value is not -// a simple builtin (or []byte), GuessSize defaults -// to 512. -func GuessSize(i interface{}) int { - if i == nil { - return NilSize - } - - switch i := i.(type) { - case Sizer: - return i.Msgsize() - case Extension: - return ExtensionPrefixSize + i.Len() - case float64: - return Float64Size - case float32: - return Float32Size - case uint8, uint16, uint32, uint64, uint: - return UintSize - case int8, int16, int32, int64, int: - return IntSize - case []byte: - return BytesPrefixSize + len(i) - case string: - return StringPrefixSize + len(i) - case complex64: - return Complex64Size - case complex128: - return Complex128Size - case bool: - return BoolSize - case map[string]interface{}: - s := MapHeaderSize - for key, val := range i { - s += StringPrefixSize + len(key) + GuessSize(val) - } - return s - case map[string]string: - s := MapHeaderSize - for key, val := range i { - s += 2*StringPrefixSize + len(key) + len(val) - } - return s - default: - return 512 - } -} diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go deleted file mode 100644 index eaa03c46e..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go +++ /dev/null @@ -1,411 +0,0 @@ -package msgp - -import ( - "math" - "reflect" - "time" -) - -// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b) -func ensure(b []byte, sz int) ([]byte, int) { - l := len(b) - c := cap(b) - if c-l < sz { - o := make([]byte, (2*c)+sz) // exponential growth - n := copy(o, b) - return o[:n+sz], n - } - return b[:l+sz], l -} - -// AppendMapHeader appends a map header with the -// given size to the slice -func AppendMapHeader(b []byte, sz uint32) []byte { - switch { - case sz <= 15: - return append(b, wfixmap(uint8(sz))) - - case sz <= math.MaxUint16: - o, n := ensure(b, 3) - prefixu16(o[n:], mmap16, uint16(sz)) - return o - - default: - o, n := ensure(b, 5) - prefixu32(o[n:], mmap32, sz) - return o - } -} - -// AppendArrayHeader appends an array header with -// the given size to the slice -func AppendArrayHeader(b []byte, sz uint32) []byte { - switch { - case sz <= 15: - return append(b, wfixarray(uint8(sz))) - - case sz <= math.MaxUint16: - o, n := ensure(b, 3) - prefixu16(o[n:], marray16, uint16(sz)) - return o - - default: - o, n := ensure(b, 5) - prefixu32(o[n:], marray32, sz) - return o - } -} - -// AppendNil appends a 'nil' byte to the slice -func AppendNil(b []byte) []byte { return append(b, mnil) } - -// AppendFloat64 appends a float64 to the slice -func AppendFloat64(b []byte, f float64) []byte { - o, n := ensure(b, Float64Size) - prefixu64(o[n:], mfloat64, math.Float64bits(f)) - return o -} - -// AppendFloat32 appends a float32 to the slice -func AppendFloat32(b []byte, f float32) []byte { - o, n := ensure(b, Float32Size) - prefixu32(o[n:], mfloat32, math.Float32bits(f)) - return o -} - -// AppendInt64 appends an int64 to the slice -func AppendInt64(b []byte, i int64) []byte { - if i >= 0 { - switch { - case i <= math.MaxInt8: - return append(b, wfixint(uint8(i))) - case i <= math.MaxInt16: - o, n := ensure(b, 3) - putMint16(o[n:], int16(i)) - return o - case i <= math.MaxInt32: - o, n := ensure(b, 5) - putMint32(o[n:], int32(i)) - return o - default: - o, n := ensure(b, 9) - putMint64(o[n:], i) - return o - } - } - switch { - case i >= -32: - return append(b, wnfixint(int8(i))) - case i >= math.MinInt8: - o, n := ensure(b, 2) - putMint8(o[n:], int8(i)) - return o - case i >= math.MinInt16: - o, n := ensure(b, 3) - putMint16(o[n:], int16(i)) - return o - case i >= math.MinInt32: - o, n := ensure(b, 5) - putMint32(o[n:], int32(i)) - return o - default: - o, n := ensure(b, 9) - putMint64(o[n:], i) - return o - } -} - -// AppendInt appends an int to the slice -func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) } - -// AppendInt8 appends an int8 to the slice -func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) } - -// AppendInt16 appends an int16 to the slice -func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) } - -// AppendInt32 appends an int32 to the slice -func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) } - -// AppendUint64 appends a uint64 to the slice -func AppendUint64(b []byte, u uint64) []byte { - switch { - case u <= (1<<7)-1: - return append(b, wfixint(uint8(u))) - - case u <= math.MaxUint8: - o, n := ensure(b, 2) - putMuint8(o[n:], uint8(u)) - return o - - case u <= math.MaxUint16: - o, n := ensure(b, 3) - putMuint16(o[n:], uint16(u)) - return o - - case u <= math.MaxUint32: - o, n := ensure(b, 5) - putMuint32(o[n:], uint32(u)) - return o - - default: - o, n := ensure(b, 9) - putMuint64(o[n:], u) - return o - - } -} - -// AppendUint appends a uint to the slice -func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) } - -// AppendUint8 appends a uint8 to the slice -func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) } - -// AppendByte is analogous to AppendUint8 -func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) } - -// AppendUint16 appends a uint16 to the slice -func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) } - -// AppendUint32 appends a uint32 to the slice -func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) } - -// AppendBytes appends bytes to the slice as MessagePack 'bin' data -func AppendBytes(b []byte, bts []byte) []byte { - sz := len(bts) - var o []byte - var n int - switch { - case sz <= math.MaxUint8: - o, n = ensure(b, 2+sz) - prefixu8(o[n:], mbin8, uint8(sz)) - n += 2 - case sz <= math.MaxUint16: - o, n = ensure(b, 3+sz) - prefixu16(o[n:], mbin16, uint16(sz)) - n += 3 - default: - o, n = ensure(b, 5+sz) - prefixu32(o[n:], mbin32, uint32(sz)) - n += 5 - } - return o[:n+copy(o[n:], bts)] -} - -// AppendBool appends a bool to the slice -func AppendBool(b []byte, t bool) []byte { - if t { - return append(b, mtrue) - } - return append(b, mfalse) -} - -// AppendString appends a string as a MessagePack 'str' to the slice -func AppendString(b []byte, s string) []byte { - sz := len(s) - var n int - var o []byte - switch { - case sz <= 31: - o, n = ensure(b, 1+sz) - o[n] = wfixstr(uint8(sz)) - n++ - case sz <= math.MaxUint8: - o, n = ensure(b, 2+sz) - prefixu8(o[n:], mstr8, uint8(sz)) - n += 2 - case sz <= math.MaxUint16: - o, n = ensure(b, 3+sz) - prefixu16(o[n:], mstr16, uint16(sz)) - n += 3 - default: - o, n = ensure(b, 5+sz) - prefixu32(o[n:], mstr32, uint32(sz)) - n += 5 - } - return o[:n+copy(o[n:], s)] -} - -// AppendStringFromBytes appends a []byte -// as a MessagePack 'str' to the slice 'b.' -func AppendStringFromBytes(b []byte, str []byte) []byte { - sz := len(str) - var n int - var o []byte - switch { - case sz <= 31: - o, n = ensure(b, 1+sz) - o[n] = wfixstr(uint8(sz)) - n++ - case sz <= math.MaxUint8: - o, n = ensure(b, 2+sz) - prefixu8(o[n:], mstr8, uint8(sz)) - n += 2 - case sz <= math.MaxUint16: - o, n = ensure(b, 3+sz) - prefixu16(o[n:], mstr16, uint16(sz)) - n += 3 - default: - o, n = ensure(b, 5+sz) - prefixu32(o[n:], mstr32, uint32(sz)) - n += 5 - } - return o[:n+copy(o[n:], str)] -} - -// AppendComplex64 appends a complex64 to the slice as a MessagePack extension -func AppendComplex64(b []byte, c complex64) []byte { - o, n := ensure(b, Complex64Size) - o[n] = mfixext8 - o[n+1] = Complex64Extension - big.PutUint32(o[n+2:], math.Float32bits(real(c))) - big.PutUint32(o[n+6:], math.Float32bits(imag(c))) - return o -} - -// AppendComplex128 appends a complex128 to the slice as a MessagePack extension -func AppendComplex128(b []byte, c complex128) []byte { - o, n := ensure(b, Complex128Size) - o[n] = mfixext16 - o[n+1] = Complex128Extension - big.PutUint64(o[n+2:], math.Float64bits(real(c))) - big.PutUint64(o[n+10:], math.Float64bits(imag(c))) - return o -} - -// AppendTime appends a time.Time to the slice as a MessagePack extension -func AppendTime(b []byte, t time.Time) []byte { - o, n := ensure(b, TimeSize) - t = t.UTC() - o[n] = mext8 - o[n+1] = 12 - o[n+2] = TimeExtension - putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond())) - return o -} - -// AppendMapStrStr appends a map[string]string to the slice -// as a MessagePack map with 'str'-type keys and values -func AppendMapStrStr(b []byte, m map[string]string) []byte { - sz := uint32(len(m)) - b = AppendMapHeader(b, sz) - for key, val := range m { - b = AppendString(b, key) - b = AppendString(b, val) - } - return b -} - -// AppendMapStrIntf appends a map[string]interface{} to the slice -// as a MessagePack map with 'str'-type keys. -func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) { - sz := uint32(len(m)) - b = AppendMapHeader(b, sz) - var err error - for key, val := range m { - b = AppendString(b, key) - b, err = AppendIntf(b, val) - if err != nil { - return b, err - } - } - return b, nil -} - -// AppendIntf appends the concrete type of 'i' to the -// provided []byte. 'i' must be one of the following: -// - 'nil' -// - A bool, float, string, []byte, int, uint, or complex -// - A map[string]interface{} or map[string]string -// - A []T, where T is another supported type -// - A *T, where T is another supported type -// - A type that satisfieds the msgp.Marshaler interface -// - A type that satisfies the msgp.Extension interface -func AppendIntf(b []byte, i interface{}) ([]byte, error) { - if i == nil { - return AppendNil(b), nil - } - - // all the concrete types - // for which we have methods - switch i := i.(type) { - case Marshaler: - return i.MarshalMsg(b) - case Extension: - return AppendExtension(b, i) - case bool: - return AppendBool(b, i), nil - case float32: - return AppendFloat32(b, i), nil - case float64: - return AppendFloat64(b, i), nil - case complex64: - return AppendComplex64(b, i), nil - case complex128: - return AppendComplex128(b, i), nil - case string: - return AppendString(b, i), nil - case []byte: - return AppendBytes(b, i), nil - case int8: - return AppendInt8(b, i), nil - case int16: - return AppendInt16(b, i), nil - case int32: - return AppendInt32(b, i), nil - case int64: - return AppendInt64(b, i), nil - case int: - return AppendInt64(b, int64(i)), nil - case uint: - return AppendUint64(b, uint64(i)), nil - case uint8: - return AppendUint8(b, i), nil - case uint16: - return AppendUint16(b, i), nil - case uint32: - return AppendUint32(b, i), nil - case uint64: - return AppendUint64(b, i), nil - case time.Time: - return AppendTime(b, i), nil - case map[string]interface{}: - return AppendMapStrIntf(b, i) - case map[string]string: - return AppendMapStrStr(b, i), nil - case []interface{}: - b = AppendArrayHeader(b, uint32(len(i))) - var err error - for _, k := range i { - b, err = AppendIntf(b, k) - if err != nil { - return b, err - } - } - return b, nil - } - - var err error - v := reflect.ValueOf(i) - switch v.Kind() { - case reflect.Array, reflect.Slice: - l := v.Len() - b = AppendArrayHeader(b, uint32(l)) - for i := 0; i < l; i++ { - b, err = AppendIntf(b, v.Index(i).Interface()) - if err != nil { - return b, err - } - } - return b, nil - case reflect.Ptr: - if v.IsNil() { - return AppendNil(b), err - } - b, err = AppendIntf(b, v.Elem().Interface()) - return b, err - default: - return b, &ErrUnsupportedType{T: v.Type()} - } -} diff --git a/vendor/github.com/twmb/murmur3/.gitignore b/vendor/github.com/twmb/murmur3/.gitignore new file mode 100644 index 000000000..1377554eb --- /dev/null +++ b/vendor/github.com/twmb/murmur3/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/vendor/github.com/twmb/murmur3/.travis.yml b/vendor/github.com/twmb/murmur3/.travis.yml new file mode 100644 index 000000000..b9ffa7e49 --- /dev/null +++ b/vendor/github.com/twmb/murmur3/.travis.yml @@ -0,0 +1,10 @@ +sudo: false + +language: go + +go: + - "1.10" + - "1.11" + +notifications: + email: false diff --git a/vendor/github.com/twmb/murmur3/LICENSE b/vendor/github.com/twmb/murmur3/LICENSE new file mode 100644 index 000000000..e4a085c5b --- /dev/null +++ b/vendor/github.com/twmb/murmur3/LICENSE @@ -0,0 +1,49 @@ +Copyright 2013, Sébastien Paolacci. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Copyright 2018, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/murmur3/README.md b/vendor/github.com/twmb/murmur3/README.md new file mode 100644 index 000000000..1917fe310 --- /dev/null +++ b/vendor/github.com/twmb/murmur3/README.md @@ -0,0 +1,129 @@ +murmur3 +======= + +Native Go implementation of Austin Appleby's third MurmurHash revision (aka +MurmurHash3). + +Includes assembly for amd64 for 64/128 bit hashes, seeding functions, +and string functions to avoid string to slice conversions. + +Hand rolled 32 bit assembly was removed during 1.11, but may be reintroduced +if the compiler slows down any more. As is, the compiler generates marginally +slower code (by one instruction in the hot loop). + +The reference algorithm has been slightly hacked as to support the streaming mode +required by Go's standard [Hash interface](http://golang.org/pkg/hash/#Hash). + +Endianness +========== + +Unlike the canonical source, this library **always** reads bytes as little +endian numbers. This makes the hashes portable across architectures, although +does mean that hashing is a bit slower on big endian architectures. + +Safety +====== + +This library used to use `unsafe` to convert four bytes to a `uint32` and eight +bytes to a `uint64`, but Go 1.14 introduced checks around those types of +conversions that flagged that code as erroneous when hashing on unaligned +input. While the code would not be problematic on amd64, it could be +problematic on some architectures. + +As of Go 1.14, those conversions were removed at the expense of a very minor +performance hit. This hit affects all cpu architectures on for `Sum32`, and +non-amd64 architectures for `Sum64` and `Sum128`. For 64 and 128, custom +assembly exists for amd64 that preserves performance. + +Testing +======= + +[![Build Status](https://travis-ci.org/twmb/murmur3.svg?branch=master)](https://travis-ci.org/twmb/murmur3) + +Testing includes comparing random inputs against the [canonical +implementation](https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp), +and testing length 0 through 17 inputs to force all branches. + +Because this code always reads input as little endian, testing against the +canonical source is skipped for big endian architectures. The canonical source +just converts bytes to numbers, meaning on big endian architectures, it will +use different numbers for its hashing. + +Documentation +============= + +[![GoDoc](https://godoc.org/github.com/twmb/murmur3?status.svg)](https://godoc.org/github.com/twmb/murmur3) + +Full documentation can be found on `godoc`. + +Benchmarks +========== + +Benchmarks below were run on an amd64 machine with _and_ without the custom +assembly. The following numbers are for Go 1.14.1 and are comparing against +[spaolacci/murmur3](https://github.com/spaolacci/murmur3). + +You will notice that at small sizes, the other library is better. This is due +to this library converting to safe code for Go 1.14. At large sizes, this +library is nearly identical to the other. On amd64, the 64 bit and 128 bit +sums come out to ~9% faster. + +32 bit sums: + +``` +32Sizes/32-12 3.00GB/s ± 1% 2.12GB/s ±11% -29.24% (p=0.000 n=9+10) +32Sizes/64-12 3.61GB/s ± 3% 2.79GB/s ± 8% -22.62% (p=0.000 n=10+10) +32Sizes/128-12 3.47GB/s ± 8% 2.79GB/s ± 4% -19.47% (p=0.000 n=10+10) +32Sizes/256-12 3.66GB/s ± 4% 3.25GB/s ± 6% -11.09% (p=0.000 n=10+10) +32Sizes/512-12 3.78GB/s ± 3% 3.54GB/s ± 4% -6.30% (p=0.000 n=9+9) +32Sizes/1024-12 3.86GB/s ± 3% 3.69GB/s ± 5% -4.46% (p=0.000 n=10+10) +32Sizes/2048-12 3.85GB/s ± 3% 3.81GB/s ± 3% ~ (p=0.079 n=10+9) +32Sizes/4096-12 3.90GB/s ± 3% 3.82GB/s ± 2% -2.14% (p=0.029 n=10+10) +32Sizes/8192-12 3.82GB/s ± 3% 3.78GB/s ± 7% ~ (p=0.529 n=10+10) +``` + +64/128 bit sums, non-amd64: + +``` +64Sizes/32-12 2.34GB/s ± 5% 2.64GB/s ± 9% +12.87% (p=0.000 n=10+10) +64Sizes/64-12 3.62GB/s ± 5% 3.96GB/s ± 4% +9.41% (p=0.000 n=10+10) +64Sizes/128-12 5.12GB/s ± 3% 5.44GB/s ± 4% +6.09% (p=0.000 n=10+9) +64Sizes/256-12 6.35GB/s ± 2% 6.27GB/s ± 9% ~ (p=0.796 n=10+10) +64Sizes/512-12 6.58GB/s ± 7% 6.79GB/s ± 3% ~ (p=0.075 n=10+10) +64Sizes/1024-12 7.49GB/s ± 3% 7.55GB/s ± 9% ~ (p=0.393 n=10+10) +64Sizes/2048-12 8.06GB/s ± 2% 7.90GB/s ± 6% ~ (p=0.156 n=9+10) +64Sizes/4096-12 8.27GB/s ± 6% 8.22GB/s ± 5% ~ (p=0.631 n=10+10) +64Sizes/8192-12 8.35GB/s ± 4% 8.38GB/s ± 6% ~ (p=0.631 n=10+10) +128Sizes/32-12 2.27GB/s ± 2% 2.68GB/s ± 5% +18.00% (p=0.000 n=10+10) +128Sizes/64-12 3.55GB/s ± 2% 4.00GB/s ± 3% +12.47% (p=0.000 n=8+9) +128Sizes/128-12 5.09GB/s ± 1% 5.43GB/s ± 3% +6.65% (p=0.000 n=9+9) +128Sizes/256-12 6.33GB/s ± 3% 5.65GB/s ± 4% -10.79% (p=0.000 n=9+10) +128Sizes/512-12 6.78GB/s ± 3% 6.74GB/s ± 6% ~ (p=0.968 n=9+10) +128Sizes/1024-12 7.46GB/s ± 4% 7.56GB/s ± 4% ~ (p=0.222 n=9+9) +128Sizes/2048-12 7.99GB/s ± 4% 7.96GB/s ± 3% ~ (p=0.666 n=9+9) +128Sizes/4096-12 8.20GB/s ± 2% 8.25GB/s ± 4% ~ (p=0.631 n=10+10) +128Sizes/8192-12 8.24GB/s ± 2% 8.26GB/s ± 5% ~ (p=0.673 n=8+9) +``` + +64/128 bit sums, amd64: + +``` +64Sizes/32-12 2.34GB/s ± 5% 4.36GB/s ± 3% +85.86% (p=0.000 n=10+10) +64Sizes/64-12 3.62GB/s ± 5% 6.27GB/s ± 3% +73.37% (p=0.000 n=10+9) +64Sizes/128-12 5.12GB/s ± 3% 7.70GB/s ± 6% +50.27% (p=0.000 n=10+10) +64Sizes/256-12 6.35GB/s ± 2% 8.61GB/s ± 3% +35.50% (p=0.000 n=10+10) +64Sizes/512-12 6.58GB/s ± 7% 8.59GB/s ± 4% +30.48% (p=0.000 n=10+9) +64Sizes/1024-12 7.49GB/s ± 3% 8.81GB/s ± 2% +17.66% (p=0.000 n=10+10) +64Sizes/2048-12 8.06GB/s ± 2% 8.90GB/s ± 4% +10.49% (p=0.000 n=9+10) +64Sizes/4096-12 8.27GB/s ± 6% 8.90GB/s ± 4% +7.54% (p=0.000 n=10+10) +64Sizes/8192-12 8.35GB/s ± 4% 9.00GB/s ± 3% +7.80% (p=0.000 n=10+9) +128Sizes/32-12 2.27GB/s ± 2% 4.29GB/s ± 9% +88.75% (p=0.000 n=10+10) +128Sizes/64-12 3.55GB/s ± 2% 6.10GB/s ± 8% +71.78% (p=0.000 n=8+10) +128Sizes/128-12 5.09GB/s ± 1% 7.62GB/s ± 9% +49.63% (p=0.000 n=9+10) +128Sizes/256-12 6.33GB/s ± 3% 8.65GB/s ± 3% +36.71% (p=0.000 n=9+10) +128Sizes/512-12 6.78GB/s ± 3% 8.39GB/s ± 6% +23.77% (p=0.000 n=9+10) +128Sizes/1024-12 7.46GB/s ± 4% 8.70GB/s ± 4% +16.70% (p=0.000 n=9+10) +128Sizes/2048-12 7.99GB/s ± 4% 8.73GB/s ± 8% +9.26% (p=0.003 n=9+10) +128Sizes/4096-12 8.20GB/s ± 2% 8.86GB/s ± 6% +8.00% (p=0.000 n=10+10) +128Sizes/8192-12 8.24GB/s ± 2% 9.01GB/s ± 3% +9.30% (p=0.000 n=8+10) +``` diff --git a/vendor/github.com/twmb/murmur3/murmur.go b/vendor/github.com/twmb/murmur3/murmur.go new file mode 100644 index 000000000..f30fc4361 --- /dev/null +++ b/vendor/github.com/twmb/murmur3/murmur.go @@ -0,0 +1,72 @@ +// Copyright 2013, Sébastien Paolacci. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package murmur3 provides an amd64 native (Go generic fallback) +// implementation of the murmur3 hash algorithm for strings and slices. +// +// Assembly is provided for amd64 go1.5+; pull requests are welcome for other +// architectures. +package murmur3 + +import ( + "reflect" + "unsafe" +) + +type bmixer interface { + bmix(p []byte) (tail []byte) + Size() (n int) + reset() +} + +type digest struct { + clen int // Digested input cumulative length. + tail []byte // 0 to Size()-1 bytes view of `buf'. + buf [16]byte // Expected (but not required) to be Size() large. + bmixer +} + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + d.clen += n + + if len(d.tail) > 0 { + // Stick back pending bytes. + nfree := d.Size() - len(d.tail) // nfree ∈ [1, d.Size()-1]. + if nfree < len(p) { + // One full block can be formed. + block := append(d.tail, p[:nfree]...) + p = p[nfree:] + _ = d.bmix(block) // No tail. + } else { + // Tail's buf is large enough to prevent reallocs. + p = append(d.tail, p...) + } + } + + d.tail = d.bmix(p) + + // Keep own copy of the 0 to Size()-1 pending bytes. + nn := copy(d.buf[:], d.tail) + d.tail = d.buf[:nn] + + return n, nil +} + +func (d *digest) Reset() { + d.clen = 0 + d.tail = nil + d.bmixer.reset() +} + +func strslice(slice []byte) string { + var str string + slicehdr := ((*reflect.SliceHeader)(unsafe.Pointer(&slice))) + strhdr := (*reflect.StringHeader)(unsafe.Pointer(&str)) + strhdr.Data = slicehdr.Data + strhdr.Len = slicehdr.Len + return str +} diff --git a/vendor/github.com/twmb/murmur3/murmur128.go b/vendor/github.com/twmb/murmur3/murmur128.go new file mode 100644 index 000000000..d02199f3e --- /dev/null +++ b/vendor/github.com/twmb/murmur3/murmur128.go @@ -0,0 +1,182 @@ +package murmur3 + +import ( + "hash" + "math/bits" +) + +const ( + c1_128 = 0x87c37b91114253d5 + c2_128 = 0x4cf5ad432745937f +) + +// Make sure interfaces are correctly implemented. +var ( + _ hash.Hash = new(digest128) + _ Hash128 = new(digest128) + _ bmixer = new(digest128) +) + +// Hash128 provides an interface for a streaming 128 bit hash. +type Hash128 interface { + hash.Hash + Sum128() (uint64, uint64) +} + +// digest128 represents a partial evaluation of a 128 bites hash. +type digest128 struct { + digest + seed1 uint64 + seed2 uint64 + h1 uint64 // Unfinalized running hash part 1. + h2 uint64 // Unfinalized running hash part 2. +} + +// SeedNew128 returns a Hash128 for streaming 128 bit sums with its internal +// digests initialized to seed1 and seed2. +// +// The canonical implementation allows one only uint32 seed; to imitate that +// behavior, use the same, uint32-max seed for seed1 and seed2. +func SeedNew128(seed1, seed2 uint64) Hash128 { + d := &digest128{seed1: seed1, seed2: seed2} + d.bmixer = d + d.Reset() + return d +} + +// New128 returns a Hash128 for streaming 128 bit sums. +func New128() Hash128 { + return SeedNew128(0, 0) +} + +func (d *digest128) Size() int { return 16 } + +func (d *digest128) reset() { d.h1, d.h2 = d.seed1, d.seed2 } + +func (d *digest128) Sum(b []byte) []byte { + h1, h2 := d.Sum128() + return append(b, + byte(h1>>56), byte(h1>>48), byte(h1>>40), byte(h1>>32), + byte(h1>>24), byte(h1>>16), byte(h1>>8), byte(h1), + + byte(h2>>56), byte(h2>>48), byte(h2>>40), byte(h2>>32), + byte(h2>>24), byte(h2>>16), byte(h2>>8), byte(h2), + ) +} + +func (d *digest128) bmix(p []byte) (tail []byte) { + h1, h2 := d.h1, d.h2 + + for len(p) >= 16 { + k1 := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 + k2 := uint64(p[8]) | uint64(p[9])<<8 | uint64(p[10])<<16 | uint64(p[11])<<24 | uint64(p[12])<<32 | uint64(p[13])<<40 | uint64(p[14])<<48 | uint64(p[15])<<56 + p = p[16:] + + k1 *= c1_128 + k1 = bits.RotateLeft64(k1, 31) + k1 *= c2_128 + h1 ^= k1 + + h1 = bits.RotateLeft64(h1, 27) + h1 += h2 + h1 = h1*5 + 0x52dce729 + + k2 *= c2_128 + k2 = bits.RotateLeft64(k2, 33) + k2 *= c1_128 + h2 ^= k2 + + h2 = bits.RotateLeft64(h2, 31) + h2 += h1 + h2 = h2*5 + 0x38495ab5 + } + d.h1, d.h2 = h1, h2 + return p +} + +func (d *digest128) Sum128() (h1, h2 uint64) { + + h1, h2 = d.h1, d.h2 + + var k1, k2 uint64 + switch len(d.tail) & 15 { + case 15: + k2 ^= uint64(d.tail[14]) << 48 + fallthrough + case 14: + k2 ^= uint64(d.tail[13]) << 40 + fallthrough + case 13: + k2 ^= uint64(d.tail[12]) << 32 + fallthrough + case 12: + k2 ^= uint64(d.tail[11]) << 24 + fallthrough + case 11: + k2 ^= uint64(d.tail[10]) << 16 + fallthrough + case 10: + k2 ^= uint64(d.tail[9]) << 8 + fallthrough + case 9: + k2 ^= uint64(d.tail[8]) << 0 + + k2 *= c2_128 + k2 = bits.RotateLeft64(k2, 33) + k2 *= c1_128 + h2 ^= k2 + + fallthrough + + case 8: + k1 ^= uint64(d.tail[7]) << 56 + fallthrough + case 7: + k1 ^= uint64(d.tail[6]) << 48 + fallthrough + case 6: + k1 ^= uint64(d.tail[5]) << 40 + fallthrough + case 5: + k1 ^= uint64(d.tail[4]) << 32 + fallthrough + case 4: + k1 ^= uint64(d.tail[3]) << 24 + fallthrough + case 3: + k1 ^= uint64(d.tail[2]) << 16 + fallthrough + case 2: + k1 ^= uint64(d.tail[1]) << 8 + fallthrough + case 1: + k1 ^= uint64(d.tail[0]) << 0 + k1 *= c1_128 + k1 = bits.RotateLeft64(k1, 31) + k1 *= c2_128 + h1 ^= k1 + } + + h1 ^= uint64(d.clen) + h2 ^= uint64(d.clen) + + h1 += h2 + h2 += h1 + + h1 = fmix64(h1) + h2 = fmix64(h2) + + h1 += h2 + h2 += h1 + + return h1, h2 +} + +func fmix64(k uint64) uint64 { + k ^= k >> 33 + k *= 0xff51afd7ed558ccd + k ^= k >> 33 + k *= 0xc4ceb9fe1a85ec53 + k ^= k >> 33 + return k +} diff --git a/vendor/github.com/twmb/murmur3/murmur128_amd64.s b/vendor/github.com/twmb/murmur3/murmur128_amd64.s new file mode 100644 index 000000000..5272b3225 --- /dev/null +++ b/vendor/github.com/twmb/murmur3/murmur128_amd64.s @@ -0,0 +1,246 @@ +// +build go1.5,amd64 + +// SeedSum128(seed1, seed2 uint64, data []byte) (h1 uint64, h2 uint64) +TEXT ·SeedSum128(SB), $0-56 + MOVQ seed1+0(FP), R12 + MOVQ seed2+8(FP), R13 + MOVQ data_base+16(FP), SI + MOVQ data_len+24(FP), R9 + LEAQ h1+40(FP), BX + JMP sum128internal<>(SB) + +// Sum128(data []byte) (h1 uint64, h2 uint64) +TEXT ·Sum128(SB), $0-40 + XORQ R12, R12 + XORQ R13, R13 + MOVQ data_base+0(FP), SI + MOVQ data_len+8(FP), R9 + LEAQ h1+24(FP), BX + JMP sum128internal<>(SB) + +// SeedStringSum128(seed1, seed2 uint64, data string) (h1 uint64, h2 uint64) +TEXT ·SeedStringSum128(SB), $0-48 + MOVQ seed1+0(FP), R12 + MOVQ seed2+8(FP), R13 + MOVQ data_base+16(FP), SI + MOVQ data_len+24(FP), R9 + LEAQ h1+32(FP), BX + JMP sum128internal<>(SB) + +// StringSum128(data string) (h1 uint64, h2 uint64) +TEXT ·StringSum128(SB), $0-32 + XORQ R12, R12 + XORQ R13, R13 + MOVQ data_base+0(FP), SI + MOVQ data_len+8(FP), R9 + LEAQ h1+16(FP), BX + JMP sum128internal<>(SB) + +// Expects: +// R12 == h1 uint64 seed +// R13 == h2 uint64 seed +// SI == &data +// R9 == len(data) +// BX == &[2]uint64 return +TEXT sum128internal<>(SB), $0 + MOVQ $0x87c37b91114253d5, R14 // c1 + MOVQ $0x4cf5ad432745937f, R15 // c2 + + MOVQ R9, CX + ANDQ $-16, CX // cx == data_len - (data_len % 16) + + // for r10 = 0; r10 < cx; r10 += 16 {... + XORQ R10, R10 + +loop: + CMPQ R10, CX + JE tail + MOVQ (SI)(R10*1), AX + MOVQ 8(SI)(R10*1), DX + ADDQ $16, R10 + + IMULQ R14, AX + IMULQ R15, DX + + ROLQ $31, AX + ROLQ $33, DX + + IMULQ R15, AX + IMULQ R14, DX + + XORQ AX, R12 + ROLQ $27, R12 + ADDQ R13, R12 + XORQ DX, R13 + ROLQ $31, R13 + LEAQ 0x52dce729(R12)(R12*4), R12 + + ADDQ R12, R13 + LEAQ 0x38495ab5(R13)(R13*4), R13 + + JMP loop + +tail: + MOVQ R9, CX + ANDQ $0xf, CX + JZ finalize // if len % 16 == 0 + + XORQ AX, AX + + // poor man's binary tree jump table + SUBQ $8, CX + JZ tail8 + JG over8 + ADDQ $4, CX + JZ tail4 + JG over4 + ADDQ $2, CX + JL tail1 + JZ tail2 + JMP tail3 + +over4: + SUBQ $2, CX + JL tail5 + JZ tail6 + JMP tail7 + +over8: + SUBQ $4, CX + JZ tail12 + JG over12 + ADDQ $2, CX + JL tail9 + JZ tail10 + JMP tail11 + +over12: + SUBQ $2, CX + JL tail13 + JZ tail14 + +tail15: + MOVBQZX 14(SI)(R10*1), AX + SALQ $16, AX + +tail14: + MOVW 12(SI)(R10*1), AX + SALQ $32, AX + JMP tail12 + +tail13: + MOVBQZX 12(SI)(R10*1), AX + SALQ $32, AX + +tail12: + MOVL 8(SI)(R10*1), DX + ORQ DX, AX + JMP fintailhigh + +tail11: + MOVBQZX 10(SI)(R10*1), AX + SALQ $16, AX + +tail10: + MOVW 8(SI)(R10*1), AX + JMP fintailhigh + +tail9: + MOVB 8(SI)(R10*1), AL + +fintailhigh: + IMULQ R15, AX + ROLQ $33, AX + IMULQ R14, AX + XORQ AX, R13 + +tail8: + MOVQ (SI)(R10*1), AX + JMP fintaillow + +tail7: + MOVBQZX 6(SI)(R10*1), AX + SALQ $16, AX + +tail6: + MOVW 4(SI)(R10*1), AX + SALQ $32, AX + JMP tail4 + +tail5: + MOVBQZX 4(SI)(R10*1), AX + SALQ $32, AX + +tail4: + MOVL (SI)(R10*1), DX + ORQ DX, AX + JMP fintaillow + +tail3: + MOVBQZX 2(SI)(R10*1), AX + SALQ $16, AX + +tail2: + MOVW (SI)(R10*1), AX + JMP fintaillow + +tail1: + MOVB (SI)(R10*1), AL + +fintaillow: + IMULQ R14, AX + ROLQ $31, AX + IMULQ R15, AX + XORQ AX, R12 + +finalize: + XORQ R9, R12 + XORQ R9, R13 + + ADDQ R13, R12 + ADDQ R12, R13 + + // fmix128 (both interleaved) + MOVQ R12, DX + MOVQ R13, AX + + SHRQ $33, DX + SHRQ $33, AX + + XORQ DX, R12 + XORQ AX, R13 + + MOVQ $0xff51afd7ed558ccd, CX + + IMULQ CX, R12 + IMULQ CX, R13 + + MOVQ R12, DX + MOVQ R13, AX + + SHRQ $33, DX + SHRQ $33, AX + + XORQ DX, R12 + XORQ AX, R13 + + MOVQ $0xc4ceb9fe1a85ec53, CX + + IMULQ CX, R12 + IMULQ CX, R13 + + MOVQ R12, DX + MOVQ R13, AX + + SHRQ $33, DX + SHRQ $33, AX + + XORQ DX, R12 + XORQ AX, R13 + + ADDQ R13, R12 + ADDQ R12, R13 + + MOVQ R12, (BX) + MOVQ R13, 8(BX) + RET diff --git a/vendor/github.com/twmb/murmur3/murmur128_decl.go b/vendor/github.com/twmb/murmur3/murmur128_decl.go new file mode 100644 index 000000000..a380b444e --- /dev/null +++ b/vendor/github.com/twmb/murmur3/murmur128_decl.go @@ -0,0 +1,34 @@ +// +build go1.5,amd64 + +package murmur3 + +//go:noescape + +// Sum128 returns the murmur3 sum of data. It is equivalent to the following +// sequence (without the extra burden and the extra allocation): +// hasher := New128() +// hasher.Write(data) +// return hasher.Sum128() +func Sum128(data []byte) (h1 uint64, h2 uint64) + +//go:noescape + +// SeedSum128 returns the murmur3 sum of data with digests initialized to seed1 +// and seed2. +// +// The canonical implementation allows only one uint32 seed; to imitate that +// behavior, use the same, uint32-max seed for seed1 and seed2. +// +// This reads and processes the data in chunks of little endian uint64s; +// thus, the returned hashes are portable across architectures. +func SeedSum128(seed1, seed2 uint64, data []byte) (h1 uint64, h2 uint64) + +//go:noescape + +// StringSum128 is the string version of Sum128. +func StringSum128(data string) (h1 uint64, h2 uint64) + +//go:noescape + +// SeedStringSum128 is the string version of SeedSum128. +func SeedStringSum128(seed1, seed2 uint64, data string) (h1 uint64, h2 uint64) diff --git a/vendor/github.com/twmb/murmur3/murmur128_gen.go b/vendor/github.com/twmb/murmur3/murmur128_gen.go new file mode 100644 index 000000000..58425bce2 --- /dev/null +++ b/vendor/github.com/twmb/murmur3/murmur128_gen.go @@ -0,0 +1,135 @@ +// +build !go1.5 !amd64 + +package murmur3 + +import "math/bits" + +// SeedSum128 returns the murmur3 sum of data with digests initialized to seed1 +// and seed2. +// +// The canonical implementation allows only one uint32 seed; to imitate that +// behavior, use the same, uint32-max seed for seed1 and seed2. +// +// This reads and processes the data in chunks of little endian uint64s; +// thus, the returned hashes are portable across architectures. +func SeedSum128(seed1, seed2 uint64, data []byte) (h1 uint64, h2 uint64) { + return SeedStringSum128(seed1, seed2, strslice(data)) +} + +// Sum128 returns the murmur3 sum of data. It is equivalent to the following +// sequence (without the extra burden and the extra allocation): +// hasher := New128() +// hasher.Write(data) +// return hasher.Sum128() +func Sum128(data []byte) (h1 uint64, h2 uint64) { + return SeedStringSum128(0, 0, strslice(data)) +} + +// StringSum128 is the string version of Sum128. +func StringSum128(data string) (h1 uint64, h2 uint64) { + return SeedStringSum128(0, 0, data) +} + +// SeedStringSum128 is the string version of SeedSum128. +func SeedStringSum128(seed1, seed2 uint64, data string) (h1 uint64, h2 uint64) { + h1, h2 = seed1, seed2 + clen := len(data) + for len(data) >= 16 { + // yes, this is faster than using binary.LittleEndian.Uint64 + k1 := uint64(data[0]) | uint64(data[1])<<8 | uint64(data[2])<<16 | uint64(data[3])<<24 | uint64(data[4])<<32 | uint64(data[5])<<40 | uint64(data[6])<<48 | uint64(data[7])<<56 + k2 := uint64(data[8]) | uint64(data[9])<<8 | uint64(data[10])<<16 | uint64(data[11])<<24 | uint64(data[12])<<32 | uint64(data[13])<<40 | uint64(data[14])<<48 | uint64(data[15])<<56 + + data = data[16:] + + k1 *= c1_128 + k1 = bits.RotateLeft64(k1, 31) + k1 *= c2_128 + h1 ^= k1 + + h1 = bits.RotateLeft64(h1, 27) + h1 += h2 + h1 = h1*5 + 0x52dce729 + + k2 *= c2_128 + k2 = bits.RotateLeft64(k2, 33) + k2 *= c1_128 + h2 ^= k2 + + h2 = bits.RotateLeft64(h2, 31) + h2 += h1 + h2 = h2*5 + 0x38495ab5 + } + + var k1, k2 uint64 + switch len(data) { + case 15: + k2 ^= uint64(data[14]) << 48 + fallthrough + case 14: + k2 ^= uint64(data[13]) << 40 + fallthrough + case 13: + k2 ^= uint64(data[12]) << 32 + fallthrough + case 12: + k2 ^= uint64(data[11]) << 24 + fallthrough + case 11: + k2 ^= uint64(data[10]) << 16 + fallthrough + case 10: + k2 ^= uint64(data[9]) << 8 + fallthrough + case 9: + k2 ^= uint64(data[8]) << 0 + + k2 *= c2_128 + k2 = bits.RotateLeft64(k2, 33) + k2 *= c1_128 + h2 ^= k2 + + fallthrough + + case 8: + k1 ^= uint64(data[7]) << 56 + fallthrough + case 7: + k1 ^= uint64(data[6]) << 48 + fallthrough + case 6: + k1 ^= uint64(data[5]) << 40 + fallthrough + case 5: + k1 ^= uint64(data[4]) << 32 + fallthrough + case 4: + k1 ^= uint64(data[3]) << 24 + fallthrough + case 3: + k1 ^= uint64(data[2]) << 16 + fallthrough + case 2: + k1 ^= uint64(data[1]) << 8 + fallthrough + case 1: + k1 ^= uint64(data[0]) << 0 + k1 *= c1_128 + k1 = bits.RotateLeft64(k1, 31) + k1 *= c2_128 + h1 ^= k1 + } + + h1 ^= uint64(clen) + h2 ^= uint64(clen) + + h1 += h2 + h2 += h1 + + h1 = fmix64(h1) + h2 = fmix64(h2) + + h1 += h2 + h2 += h1 + + return h1, h2 +} diff --git a/vendor/github.com/twmb/murmur3/murmur32.go b/vendor/github.com/twmb/murmur3/murmur32.go new file mode 100644 index 000000000..f61b58f7f --- /dev/null +++ b/vendor/github.com/twmb/murmur3/murmur32.go @@ -0,0 +1,100 @@ +package murmur3 + +import ( + "hash" + "math/bits" +) + +// Make sure interfaces are correctly implemented. +var ( + _ hash.Hash = new(digest32) + _ hash.Hash32 = new(digest32) +) + +const ( + c1_32 uint32 = 0xcc9e2d51 + c2_32 uint32 = 0x1b873593 +) + +// digest32 represents a partial evaluation of a 32 bites hash. +type digest32 struct { + digest + seed uint32 + h1 uint32 // Unfinalized running hash. +} + +// SeedNew32 returns a hash.Hash32 for streaming 32 bit sums with its internal +// digest initialized to seed. +// +// This reads and processes the data in chunks of little endian uint32s; +// thus, the returned hash is portable across architectures. +func SeedNew32(seed uint32) hash.Hash32 { + d := &digest32{seed: seed} + d.bmixer = d + d.Reset() + return d +} + +// New32 returns a hash.Hash32 for streaming 32 bit sums. +func New32() hash.Hash32 { + return SeedNew32(0) +} + +func (d *digest32) Size() int { return 4 } + +func (d *digest32) reset() { d.h1 = d.seed } + +func (d *digest32) Sum(b []byte) []byte { + h := d.Sum32() + return append(b, byte(h>>24), byte(h>>16), byte(h>>8), byte(h)) +} + +// Digest as many blocks as possible. +func (d *digest32) bmix(p []byte) (tail []byte) { + h1 := d.h1 + + for len(p) >= 4 { + k1 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + p = p[4:] + + k1 *= c1_32 + k1 = bits.RotateLeft32(k1, 15) + k1 *= c2_32 + + h1 ^= k1 + h1 = bits.RotateLeft32(h1, 13) + h1 = h1*5 + 0xe6546b64 + } + d.h1 = h1 + return p +} + +func (d *digest32) Sum32() (h1 uint32) { + + h1 = d.h1 + var k1 uint32 + switch len(d.tail) & 3 { + case 3: + k1 ^= uint32(d.tail[2]) << 16 + fallthrough + case 2: + k1 ^= uint32(d.tail[1]) << 8 + fallthrough + case 1: + k1 ^= uint32(d.tail[0]) + k1 *= c1_32 + k1 = bits.RotateLeft32(k1, 15) + k1 *= c2_32 + h1 ^= k1 + } + + h1 ^= uint32(d.clen) + + h1 ^= h1 >> 16 + h1 *= 0x85ebca6b + h1 ^= h1 >> 13 + h1 *= 0xc2b2ae35 + h1 ^= h1 >> 16 + + return h1 +} diff --git a/vendor/github.com/twmb/murmur3/murmur32_gen.go b/vendor/github.com/twmb/murmur3/murmur32_gen.go new file mode 100644 index 000000000..49c713308 --- /dev/null +++ b/vendor/github.com/twmb/murmur3/murmur32_gen.go @@ -0,0 +1,69 @@ +package murmur3 + +import "math/bits" + +// SeedSum32 returns the murmur3 sum of data with the digest initialized to +// seed. +// +// This reads and processes the data in chunks of little endian uint32s; +// thus, the returned hash is portable across architectures. +func SeedSum32(seed uint32, data []byte) (h1 uint32) { + return SeedStringSum32(seed, strslice(data)) +} + +// Sum32 returns the murmur3 sum of data. It is equivalent to the following +// sequence (without the extra burden and the extra allocation): +// hasher := New32() +// hasher.Write(data) +// return hasher.Sum32() +func Sum32(data []byte) uint32 { + return SeedStringSum32(0, strslice(data)) +} + +// StringSum32 is the string version of Sum32. +func StringSum32(data string) uint32 { + return SeedStringSum32(0, data) +} + +// SeedStringSum32 is the string version of SeedSum32. +func SeedStringSum32(seed uint32, data string) (h1 uint32) { + h1 = seed + clen := uint32(len(data)) + for len(data) >= 4 { + k1 := uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16 | uint32(data[3])<<24 + data = data[4:] + + k1 *= c1_32 + k1 = bits.RotateLeft32(k1, 15) + k1 *= c2_32 + + h1 ^= k1 + h1 = bits.RotateLeft32(h1, 13) + h1 = h1*5 + 0xe6546b64 + } + var k1 uint32 + switch len(data) { + case 3: + k1 ^= uint32(data[2]) << 16 + fallthrough + case 2: + k1 ^= uint32(data[1]) << 8 + fallthrough + case 1: + k1 ^= uint32(data[0]) + k1 *= c1_32 + k1 = bits.RotateLeft32(k1, 15) + k1 *= c2_32 + h1 ^= k1 + } + + h1 ^= uint32(clen) + + h1 ^= h1 >> 16 + h1 *= 0x85ebca6b + h1 ^= h1 >> 13 + h1 *= 0xc2b2ae35 + h1 ^= h1 >> 16 + + return h1 +} diff --git a/vendor/github.com/twmb/murmur3/murmur64.go b/vendor/github.com/twmb/murmur3/murmur64.go new file mode 100644 index 000000000..3dfd1d96f --- /dev/null +++ b/vendor/github.com/twmb/murmur3/murmur64.go @@ -0,0 +1,70 @@ +package murmur3 + +import ( + "hash" +) + +// Make sure interfaces are correctly implemented. +var ( + _ hash.Hash = new(digest64) + _ hash.Hash64 = new(digest64) + _ bmixer = new(digest64) +) + +// digest64 is half a digest128. +type digest64 digest128 + +// SeedNew64 returns a hash.Hash64 for streaming 64 bit sums. As the canonical +// implementation does not support Sum64, this uses SeedNew128(seed, seed) +func SeedNew64(seed uint64) hash.Hash64 { + return (*digest64)(SeedNew128(seed, seed).(*digest128)) +} + +// New64 returns a hash.Hash64 for streaming 64 bit sums. +func New64() hash.Hash64 { + return SeedNew64(0) +} + +func (d *digest64) Sum(b []byte) []byte { + h1 := d.Sum64() + return append(b, + byte(h1>>56), byte(h1>>48), byte(h1>>40), byte(h1>>32), + byte(h1>>24), byte(h1>>16), byte(h1>>8), byte(h1)) +} + +func (d *digest64) Sum64() uint64 { + h1, _ := (*digest128)(d).Sum128() + return h1 +} + +// Sum64 returns the murmur3 sum of data. It is equivalent to the following +// sequence (without the extra burden and the extra allocation): +// hasher := New64() +// hasher.Write(data) +// return hasher.Sum64() +func Sum64(data []byte) uint64 { + h1, _ := Sum128(data) + return h1 +} + +// SeedSum64 returns the murmur3 sum of data with the digest initialized to +// seed. +// +// Because the canonical implementation does not support SeedSum64, this uses +// SeedSum128(seed, seed, data). +func SeedSum64(seed uint64, data []byte) uint64 { + h1, _ := SeedSum128(seed, seed, data) + return h1 +} + +// StringSum64 is the string version of Sum64. +func StringSum64(data string) uint64 { + h1, _ := StringSum128(data) + return h1 +} + +// SeedStringSum64 is the string version of SeedSum64. +func SeedStringSum64(seed uint64, data string) uint64 { + h1, _ := SeedStringSum128(seed, seed, data) + return h1 +} diff --git a/vendor/github.com/uber-go/tally/glide.lock b/vendor/github.com/uber-go/tally/glide.lock deleted file mode 100644 index bb90fb08c..000000000 --- a/vendor/github.com/uber-go/tally/glide.lock +++ /dev/null @@ -1,74 +0,0 @@ -hash: 6c5a37d4f995175d7ab310d09b5866057c683536b0ae3d8f478f87943aa03be4 -updated: 2019-11-07T15:02:21.080076-05:00 -imports: -- name: github.com/beorn7/perks - version: 3a771d992973f24aa725d07868b467d1ddfceafb - subpackages: - - quantile -- name: github.com/cactus/go-statsd-client - version: 138b925ccdf617776955904ba7759fce64406cec - subpackages: - - statsd -- name: github.com/golang/protobuf - version: 14aad3d5ea4c323bcd7a2137e735da24a76e814c - subpackages: - - proto -- name: github.com/m3db/prometheus_client_golang - version: 8ae269d24972b8695572fa6b2e3718b5ea82d6b4 - subpackages: - - prometheus - - prometheus/promhttp -- name: github.com/m3db/prometheus_client_model - version: d3fff8420252ef63bffb96f689d1a85096c97321 - subpackages: - - go -- name: github.com/m3db/prometheus_common - version: d550673fc477123acb69017380567e8fafc765fc - subpackages: - - expfmt - - internal/bitbucket.org/ww/goautoneg - - model -- name: github.com/m3db/prometheus_procfs - version: 1878d9fbb537119d24b21ca07effd591627cd160 -- name: github.com/matttproud/golang_protobuf_extensions - version: c12348ce28de40eed0136aa2b644d0ee0650e56c - subpackages: - - pbutil -- name: github.com/pkg/errors - version: ba968bfe8b2f7e042a574c888954fccecfa385b4 -- name: go.uber.org/atomic - version: 9dc4df04d0d1c39369750a9f6c32c39560672089 -- name: gopkg.in/validator.v2 - version: 135c24b11c19e52befcae2ec3fca5d9b78c4e98e -- name: gopkg.in/yaml.v2 - version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 -testImports: -- name: github.com/axw/gocov - version: 54b98cfcac0c63fb3f9bd8e7ad241b724d4e985b - subpackages: - - gocov -- name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - subpackages: - - spew -- name: github.com/golang/lint - version: c7bacac2b21ca01afa1dee0acf64df3ce047c28f - subpackages: - - golint -- name: github.com/mattn/goveralls - version: f4d273b02ce1b4e48acf3662b717aa987bfc4118 -- name: github.com/pborman/uuid - version: c55201b036063326c5b1b89ccfe45a184973d073 -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: d77da356e56a7428ad25149ca77381849a6a5232 - subpackages: - - assert - - require -- name: golang.org/x/tools - version: 3fe2afc9e626f32e91aff6eddb78b14743446865 - subpackages: - - cover diff --git a/vendor/github.com/uber-go/tally/glide.yaml b/vendor/github.com/uber-go/tally/glide.yaml deleted file mode 100644 index 8dd97e92e..000000000 --- a/vendor/github.com/uber-go/tally/glide.yaml +++ /dev/null @@ -1,44 +0,0 @@ -package: github.com/uber-go/tally -import: -- package: github.com/cactus/go-statsd-client - version: ~3.1.0 - subpackages: - - statsd -- package: github.com/m3db/prometheus_client_golang - version: ^0.8.1 - subpackages: - - prometheus -- package: github.com/m3db/prometheus_client_model - version: ^0.1.0 -- package: github.com/m3db/prometheus_common - version: ^0.1.0 -- package: github.com/m3db/prometheus_procfs - version: ^0.8.1 -- package: go.uber.org/atomic - version: ^1 -- package: github.com/pkg/errors - version: ^0.8.1 -testImport: -- package: github.com/axw/gocov - version: 54b98cfcac0c63fb3f9bd8e7ad241b724d4e985b - subpackages: - - gocov -- package: github.com/mattn/goveralls - version: f4d273b02ce1b4e48acf3662b717aa987bfc4118 -- package: golang.org/x/tools - version: 3fe2afc9e626f32e91aff6eddb78b14743446865 - subpackages: - - cover -- package: github.com/golang/lint - version: c7bacac2b21ca01afa1dee0acf64df3ce047c28f - subpackages: - - golint -- package: github.com/pborman/uuid - version: c55201b036063326c5b1b89ccfe45a184973d073 -- package: github.com/stretchr/testify - version: d77da356e56a7428ad25149ca77381849a6a5232 - subpackages: - - assert - - require -- package: gopkg.in/validator.v2 -- package: gopkg.in/yaml.v2 diff --git a/vendor/github.com/uber-go/tally/.gitignore b/vendor/github.com/uber-go/tally/v4/.gitignore similarity index 100% rename from vendor/github.com/uber-go/tally/.gitignore rename to vendor/github.com/uber-go/tally/v4/.gitignore diff --git a/vendor/github.com/uber-go/tally/.travis.yml b/vendor/github.com/uber-go/tally/v4/.travis.yml similarity index 91% rename from vendor/github.com/uber-go/tally/.travis.yml rename to vendor/github.com/uber-go/tally/v4/.travis.yml index 1b1e8c16c..990e93bd8 100644 --- a/vendor/github.com/uber-go/tally/.travis.yml +++ b/vendor/github.com/uber-go/tally/v4/.travis.yml @@ -1,9 +1,9 @@ language: go sudo: false go: - - 1.13.x - 1.14.x - - tip + - 1.15.x + - 1.16.x env: global: - GO15VENDOREXPERIMENT=1 diff --git a/vendor/github.com/uber-go/tally/LICENSE b/vendor/github.com/uber-go/tally/v4/LICENSE similarity index 100% rename from vendor/github.com/uber-go/tally/LICENSE rename to vendor/github.com/uber-go/tally/v4/LICENSE diff --git a/vendor/github.com/uber-go/tally/Makefile b/vendor/github.com/uber-go/tally/v4/Makefile similarity index 86% rename from vendor/github.com/uber-go/tally/Makefile rename to vendor/github.com/uber-go/tally/v4/Makefile index 6b268caf5..52bfc508f 100644 --- a/vendor/github.com/uber-go/tally/Makefile +++ b/vendor/github.com/uber-go/tally/v4/Makefile @@ -2,7 +2,7 @@ export GO15VENDOREXPERIMENT=1 BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem PKGS ?= $(shell glide novendor) -PKG_FILES ?= *.go example/*.go m3 +PKG_FILES ?= *.go example/*.go m3/*.go m3/customtransports m3/thriftudp LINT_IGNORE = m3/thrift\|thirdparty LICENSE_IGNORE = thirdparty @@ -21,7 +21,7 @@ dependencies: go install ./vendor/github.com/golang/lint/golint .PHONY: lint -lint: +lint: gomodtidy @rm -rf lint.log @echo "Checking formatting..." @gofmt -d -s $(PKG_FILES) 2>&1 | grep -v '$(LINT_IGNORE)' | tee lint.log @@ -35,6 +35,15 @@ lint: @./check_license.sh | grep -v '$(LICENSE_IGNORE)' | tee -a lint.log @[ ! -s lint.log ] +.PHONY: gomodtidy +gomodtidy: go.mod go.sum + go mod tidy + @if ! git diff --quiet $^; then \ + echo "go mod tidy changed files:" && \ + git status --porcelain $^ && \ + false; \ + fi + .PHONY: test test: go test -race -v $(PKGS) diff --git a/vendor/github.com/uber-go/tally/README.md b/vendor/github.com/uber-go/tally/v4/README.md similarity index 99% rename from vendor/github.com/uber-go/tally/README.md rename to vendor/github.com/uber-go/tally/v4/README.md index d36254fe6..42bdcd3b9 100644 --- a/vendor/github.com/uber-go/tally/README.md +++ b/vendor/github.com/uber-go/tally/v4/README.md @@ -75,7 +75,7 @@ func newScope() (tally.Scope, io.Closer) { scope, closer := tally.NewRootScope(tally.ScopeOptions{ Prefix: "my-service", Tags: map[string]string{}, - Reporter: r, + Reporter: reporter, }, time.Second) return scope, closer diff --git a/vendor/github.com/uber-go/tally/check_license.sh b/vendor/github.com/uber-go/tally/v4/check_license.sh similarity index 100% rename from vendor/github.com/uber-go/tally/check_license.sh rename to vendor/github.com/uber-go/tally/v4/check_license.sh diff --git a/vendor/github.com/uber-go/tally/histogram.go b/vendor/github.com/uber-go/tally/v4/histogram.go similarity index 74% rename from vendor/github.com/uber-go/tally/histogram.go rename to vendor/github.com/uber-go/tally/v4/histogram.go index fe1d68002..41491d9bd 100644 --- a/vendor/github.com/uber-go/tally/histogram.go +++ b/vendor/github.com/uber-go/tally/v4/histogram.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -35,6 +35,13 @@ var ( errBucketsCountNeedsGreaterThanZero = errors.New("n needs to be > 0") errBucketsStartNeedsGreaterThanZero = errors.New("start needs to be > 0") errBucketsFactorNeedsGreaterThanOne = errors.New("factor needs to be > 1") + + _singleBucket = bucketPair{ + lowerBoundDuration: time.Duration(math.MinInt64), + upperBoundDuration: time.Duration(math.MaxInt64), + lowerBoundValue: -math.MaxFloat64, + upperBoundValue: math.MaxFloat64, + } ) // ValueBuckets is a set of float64 values that implements Buckets. @@ -65,7 +72,7 @@ func (v ValueBuckets) String() string { // AsValues implements Buckets. func (v ValueBuckets) AsValues() []float64 { - return []float64(v) + return v } // AsDurations implements Buckets and returns time.Duration @@ -116,76 +123,135 @@ func (v DurationBuckets) AsValues() []float64 { // AsDurations implements Buckets. func (v DurationBuckets) AsDurations() []time.Duration { - return []time.Duration(v) + return v +} + +func bucketsEqual(x Buckets, y Buckets) bool { + switch b1 := x.(type) { + case DurationBuckets: + b2, ok := y.(DurationBuckets) + if !ok { + return false + } + if len(b1) != len(b2) { + return false + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + return false + } + } + case ValueBuckets: + b2, ok := y.(ValueBuckets) + if !ok { + return false + } + if len(b1) != len(b2) { + return false + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + return false + } + } + } + + return true +} + +func newBucketPair( + htype histogramType, + durations []time.Duration, + values []float64, + upperBoundIndex int, + prev BucketPair, +) bucketPair { + var pair bucketPair + + switch htype { + case durationHistogramType: + pair = bucketPair{ + lowerBoundDuration: prev.UpperBoundDuration(), + upperBoundDuration: durations[upperBoundIndex], + } + case valueHistogramType: + pair = bucketPair{ + lowerBoundValue: prev.UpperBoundValue(), + upperBoundValue: values[upperBoundIndex], + } + default: + // nop + } + + return pair } // BucketPairs creates a set of bucket pairs from a set // of buckets describing the lower and upper bounds for // each derived bucket. func BucketPairs(buckets Buckets) []BucketPair { + htype := valueHistogramType + if _, ok := buckets.(DurationBuckets); ok { + htype = durationHistogramType + } + if buckets == nil || buckets.Len() < 1 { - return []BucketPair{ - - bucketPair{ - lowerBoundValue: -math.MaxFloat64, - upperBoundValue: math.MaxFloat64, - lowerBoundDuration: time.Duration(math.MinInt64), - upperBoundDuration: time.Duration(math.MaxInt64), - }, - } + return []BucketPair{_singleBucket} } var ( - asValueBuckets = copyAndSortValues(buckets.AsValues()) - asDurationBuckets = copyAndSortDurations(buckets.AsDurations()) - pairs = make([]BucketPair, 0, buckets.Len()+2) + values []float64 + durations []time.Duration + pairs = make([]BucketPair, 0, buckets.Len()+2) + pair bucketPair ) - pairs = append(pairs, bucketPair{ - lowerBoundValue: -math.MaxFloat64, - upperBoundValue: asValueBuckets[0], - lowerBoundDuration: time.Duration(math.MinInt64), - upperBoundDuration: asDurationBuckets[0], - }) - - prevValueBucket, prevDurationBucket := - asValueBuckets[0], asDurationBuckets[0] + switch htype { + case durationHistogramType: + durations = copyAndSortDurations(buckets.AsDurations()) + pair.lowerBoundDuration = _singleBucket.lowerBoundDuration + pair.upperBoundDuration = durations[0] + case valueHistogramType: + values = copyAndSortValues(buckets.AsValues()) + pair.lowerBoundValue = _singleBucket.lowerBoundValue + pair.upperBoundValue = values[0] + default: + // n.b. This branch will never be executed because htype is only ever + // one of two values. + panic("unsupported histogram type") + } + pairs = append(pairs, pair) for i := 1; i < buckets.Len(); i++ { - pairs = append(pairs, bucketPair{ - lowerBoundValue: prevValueBucket, - upperBoundValue: asValueBuckets[i], - lowerBoundDuration: prevDurationBucket, - upperBoundDuration: asDurationBuckets[i], - }) - prevValueBucket, prevDurationBucket = - asValueBuckets[i], asDurationBuckets[i] + pairs = append( + pairs, + newBucketPair(htype, durations, values, i, pairs[i-1]), + ) } - pairs = append(pairs, bucketPair{ - lowerBoundValue: prevValueBucket, - upperBoundValue: math.MaxFloat64, - lowerBoundDuration: prevDurationBucket, - upperBoundDuration: time.Duration(math.MaxInt64), - }) + switch htype { + case durationHistogramType: + pair.lowerBoundDuration = pairs[len(pairs)-1].UpperBoundDuration() + pair.upperBoundDuration = _singleBucket.upperBoundDuration + case valueHistogramType: + pair.lowerBoundValue = pairs[len(pairs)-1].UpperBoundValue() + pair.upperBoundValue = _singleBucket.upperBoundValue + } + pairs = append(pairs, pair) return pairs } func copyAndSortValues(values []float64) []float64 { valuesCopy := make([]float64, len(values)) - for i := range values { - valuesCopy[i] = values[i] - } + copy(valuesCopy, values) sort.Sort(ValueBuckets(valuesCopy)) return valuesCopy } func copyAndSortDurations(durations []time.Duration) []time.Duration { durationsCopy := make([]time.Duration, len(durations)) - for i := range durations { - durationsCopy[i] = durations[i] - } + copy(durationsCopy, durations) sort.Sort(DurationBuckets(durationsCopy)) return durationsCopy } @@ -222,7 +288,7 @@ func LinearValueBuckets(start, width float64, n int) (ValueBuckets, error) { for i := range buckets { buckets[i] = start + (float64(i) * width) } - return ValueBuckets(buckets), nil + return buckets, nil } // MustMakeLinearValueBuckets creates a set of linear value buckets @@ -244,7 +310,7 @@ func LinearDurationBuckets(start, width time.Duration, n int) (DurationBuckets, for i := range buckets { buckets[i] = start + (time.Duration(i) * width) } - return DurationBuckets(buckets), nil + return buckets, nil } // MustMakeLinearDurationBuckets creates a set of linear duration buckets. @@ -274,7 +340,7 @@ func ExponentialValueBuckets(start, factor float64, n int) (ValueBuckets, error) buckets[i] = curr curr *= factor } - return ValueBuckets(buckets), nil + return buckets, nil } // MustMakeExponentialValueBuckets creates a set of exponential value buckets @@ -304,7 +370,7 @@ func ExponentialDurationBuckets(start time.Duration, factor float64, n int) (Dur buckets[i] = curr curr = time.Duration(float64(curr) * factor) } - return DurationBuckets(buckets), nil + return buckets, nil } // MustMakeExponentialDurationBuckets creates a set of exponential value buckets diff --git a/vendor/github.com/uber-go/tally/v4/internal/identity/accumulator.go b/vendor/github.com/uber-go/tally/v4/internal/identity/accumulator.go new file mode 100644 index 000000000..83987c5c8 --- /dev/null +++ b/vendor/github.com/uber-go/tally/v4/internal/identity/accumulator.go @@ -0,0 +1,131 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package identity + +import ( + "math" + "time" + + "github.com/twmb/murmur3" +) + +const ( + _hashSeed uint64 = 23 + _hashFold uint64 = 31 +) + +// Accumulator is a commutative folding accumulator. +type Accumulator uint64 + +// NewAccumulator creates a new Accumulator with a default seed value. +// +// n.b. Here and elsewhere, we use nosplit to avoid stack size checks, which +// are unnecessary as memory width is bounded to each instance of `a` (a +// uint64) and, potentially, a single stack-local loop temporary while +// iterating. +func NewAccumulator() Accumulator { + return Accumulator(_hashSeed) +} + +// NewAccumulatorWithSeed creates a new Accumulator with the provided seed value. +func NewAccumulatorWithSeed(seed uint64) Accumulator { + return Accumulator(seed) +} + +// AddString hashes str and folds it into the accumulator. +func (a Accumulator) AddString(str string) Accumulator { + return a + Accumulator(murmur3.StringSum64(str)*_hashFold) +} + +// AddUint64 folds u64 into the accumulator. +func (a Accumulator) AddUint64(u64 uint64) Accumulator { + return a + Accumulator(u64*_hashFold) +} + +// Value returns the accumulated value. +func (a Accumulator) Value() uint64 { + return uint64(a) +} + +// Durations returns the accumulated identity of durs. +func Durations(durs []time.Duration) uint64 { + if len(durs) == 0 { + return 0 + } + + acc := NewAccumulator() + + // n.b. Wrapping due to overflow is okay here, since those values cannot be + // represented by int64. + for _, d := range durs { + acc = acc.AddUint64(uint64(d)) + } + + return acc.Value() +} + +// Int64s returns the accumulated identity of i64s. +func Int64s(i64s []int64) uint64 { + if len(i64s) == 0 { + return 0 + } + + acc := NewAccumulator() + + // n.b. Wrapping due to overflow is okay here, since those values cannot be + // represented by int64. + for _, i := range i64s { + acc = acc.AddUint64(uint64(i)) + } + + return acc.Value() +} + +// Float64s returns the accumulated identity of f64s. +func Float64s(f64s []float64) uint64 { + if len(f64s) == 0 { + return 0 + } + + // n.b. Wrapping due to overflow is okay here, since those values cannot be + // represented by int64. + acc := NewAccumulator() + + for _, f := range f64s { + acc = acc.AddUint64(math.Float64bits(f)) + } + + return acc.Value() +} + +// StringStringMap returns the accumulated identity of m. +func StringStringMap(m map[string]string) uint64 { + if len(m) == 0 { + return 0 + } + + acc := NewAccumulator() + for k, v := range m { + acc = acc.AddString(k + "=" + v) + } + + return acc.Value() +} diff --git a/vendor/github.com/uber-go/tally/key_gen.go b/vendor/github.com/uber-go/tally/v4/key_gen.go similarity index 98% rename from vendor/github.com/uber-go/tally/key_gen.go rename to vendor/github.com/uber-go/tally/v4/key_gen.go index 3ae1ae30e..9030e1c94 100644 --- a/vendor/github.com/uber-go/tally/key_gen.go +++ b/vendor/github.com/uber-go/tally/v4/key_gen.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/uber-go/tally/pool.go b/vendor/github.com/uber-go/tally/v4/pool.go similarity index 97% rename from vendor/github.com/uber-go/tally/pool.go rename to vendor/github.com/uber-go/tally/v4/pool.go index c4003565c..4d8b1401b 100644 --- a/vendor/github.com/uber-go/tally/pool.go +++ b/vendor/github.com/uber-go/tally/v4/pool.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/uber-go/tally/prometheus/README.md b/vendor/github.com/uber-go/tally/v4/prometheus/README.md similarity index 100% rename from vendor/github.com/uber-go/tally/prometheus/README.md rename to vendor/github.com/uber-go/tally/v4/prometheus/README.md diff --git a/vendor/github.com/uber-go/tally/prometheus/config.go b/vendor/github.com/uber-go/tally/v4/prometheus/config.go similarity index 98% rename from vendor/github.com/uber-go/tally/prometheus/config.go rename to vendor/github.com/uber-go/tally/v4/prometheus/config.go index 7537619c4..988525360 100644 --- a/vendor/github.com/uber-go/tally/prometheus/config.go +++ b/vendor/github.com/uber-go/tally/v4/prometheus/config.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -28,7 +28,7 @@ import ( "os" "strings" - prom "github.com/m3db/prometheus_client_golang/prometheus" + prom "github.com/prometheus/client_golang/prometheus" ) // Configuration is a configuration for a Prometheus reporter. diff --git a/vendor/github.com/uber-go/tally/prometheus/reporter.go b/vendor/github.com/uber-go/tally/v4/prometheus/reporter.go similarity index 98% rename from vendor/github.com/uber-go/tally/prometheus/reporter.go rename to vendor/github.com/uber-go/tally/v4/prometheus/reporter.go index 1642decb6..9273977fb 100644 --- a/vendor/github.com/uber-go/tally/prometheus/reporter.go +++ b/vendor/github.com/uber-go/tally/v4/prometheus/reporter.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -26,10 +26,10 @@ import ( "sync" "time" - prom "github.com/m3db/prometheus_client_golang/prometheus" - "github.com/m3db/prometheus_client_golang/prometheus/promhttp" "github.com/pkg/errors" - "github.com/uber-go/tally" + prom "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + tally "github.com/uber-go/tally/v4" ) const ( @@ -163,8 +163,8 @@ type cachedMetric struct { counter prom.Counter gauge prom.Gauge reportTimer func(d time.Duration) - histogram prom.Histogram - summary prom.Summary + histogram prom.Observer + summary prom.Observer } func (m *cachedMetric) ReportCount(value int64) { @@ -220,6 +220,7 @@ func (m noopMetric) ReportSamples(value int64) {} func (m noopMetric) ValueBucket(lower, upper float64) tally.CachedHistogramBucket { return m } + func (m noopMetric) DurationBucket(lower, upper time.Duration) tally.CachedHistogramBucket { return m } diff --git a/vendor/github.com/uber-go/tally/prometheus/sanitize.go b/vendor/github.com/uber-go/tally/v4/prometheus/sanitize.go similarity index 62% rename from vendor/github.com/uber-go/tally/prometheus/sanitize.go rename to vendor/github.com/uber-go/tally/v4/prometheus/sanitize.go index 04ef0278f..6c3f7f15e 100644 --- a/vendor/github.com/uber-go/tally/prometheus/sanitize.go +++ b/vendor/github.com/uber-go/tally/v4/prometheus/sanitize.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -21,24 +21,22 @@ package prometheus import ( - "github.com/uber-go/tally" + tally "github.com/uber-go/tally/v4" ) -var ( - // DefaultSanitizerOpts are the options for the default Prometheus sanitizer. - DefaultSanitizerOpts = tally.SanitizeOptions{ - NameCharacters: tally.ValidCharacters{ - Ranges: tally.AlphanumericRange, - Characters: tally.UnderscoreCharacters, - }, - KeyCharacters: tally.ValidCharacters{ - Ranges: tally.AlphanumericRange, - Characters: tally.UnderscoreCharacters, - }, - ValueCharacters: tally.ValidCharacters{ - Ranges: tally.AlphanumericRange, - Characters: tally.UnderscoreCharacters, - }, - ReplacementCharacter: tally.DefaultReplacementCharacter, - } -) +// DefaultSanitizerOpts are the options for the default Prometheus sanitizer. +var DefaultSanitizerOpts = tally.SanitizeOptions{ + NameCharacters: tally.ValidCharacters{ + Ranges: tally.AlphanumericRange, + Characters: tally.UnderscoreCharacters, + }, + KeyCharacters: tally.ValidCharacters{ + Ranges: tally.AlphanumericRange, + Characters: tally.UnderscoreCharacters, + }, + ValueCharacters: tally.ValidCharacters{ + Ranges: tally.AlphanumericRange, + Characters: tally.UnderscoreCharacters, + }, + ReplacementCharacter: tally.DefaultReplacementCharacter, +} diff --git a/vendor/github.com/uber-go/tally/reporter.go b/vendor/github.com/uber-go/tally/v4/reporter.go similarity index 98% rename from vendor/github.com/uber-go/tally/reporter.go rename to vendor/github.com/uber-go/tally/v4/reporter.go index 2333bf3db..22e0f2666 100644 --- a/vendor/github.com/uber-go/tally/reporter.go +++ b/vendor/github.com/uber-go/tally/v4/reporter.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/uber-go/tally/sanitize.go b/vendor/github.com/uber-go/tally/v4/sanitize.go similarity index 99% rename from vendor/github.com/uber-go/tally/sanitize.go rename to vendor/github.com/uber-go/tally/v4/sanitize.go index 33d6a212c..911c734b5 100644 --- a/vendor/github.com/uber-go/tally/sanitize.go +++ b/vendor/github.com/uber-go/tally/v4/sanitize.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/uber-go/tally/scope.go b/vendor/github.com/uber-go/tally/v4/scope.go similarity index 90% rename from vendor/github.com/uber-go/tally/scope.go rename to vendor/github.com/uber-go/tally/v4/scope.go index 66f3aa1c3..bd04db985 100644 --- a/vendor/github.com/uber-go/tally/scope.go +++ b/vendor/github.com/uber-go/tally/v4/scope.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -24,6 +24,8 @@ import ( "io" "sync" "time" + + "go.uber.org/atomic" ) const ( @@ -68,7 +70,6 @@ type scope struct { sanitizer Sanitizer registry *scopeRegistry - status scopeStatus cm sync.RWMutex gm sync.RWMutex @@ -84,12 +85,12 @@ type scope struct { timers map[string]*timer // nb: deliberately skipping timersSlice as we report timers immediately, // no buffering is involved. -} -type scopeStatus struct { - sync.RWMutex - closed bool - quit chan struct{} + bucketCache *bucketCache + closed atomic.Bool + done chan struct{} + wg sync.WaitGroup + root bool } // ScopeOptions is a set of options to construct a scope. @@ -146,25 +147,23 @@ func newRootScope(opts ScopeOptions, interval time.Duration) *scope { } s := &scope{ - separator: sanitizer.Name(opts.Separator), - prefix: sanitizer.Name(opts.Prefix), - reporter: opts.Reporter, - cachedReporter: opts.CachedReporter, - baseReporter: baseReporter, - defaultBuckets: opts.DefaultBuckets, - sanitizer: sanitizer, - status: scopeStatus{ - closed: false, - quit: make(chan struct{}, 1), - }, - + baseReporter: baseReporter, + bucketCache: newBucketCache(), + cachedReporter: opts.CachedReporter, counters: make(map[string]*counter), countersSlice: make([]*counter, 0, _defaultInitialSliceSize), + defaultBuckets: opts.DefaultBuckets, + done: make(chan struct{}), gauges: make(map[string]*gauge), gaugesSlice: make([]*gauge, 0, _defaultInitialSliceSize), histograms: make(map[string]*histogram), histogramsSlice: make([]*histogram, 0, _defaultInitialSliceSize), + prefix: sanitizer.Name(opts.Prefix), + reporter: opts.Reporter, + sanitizer: sanitizer, + separator: sanitizer.Name(opts.Separator), timers: make(map[string]*timer), + root: true, } // NB(r): Take a copy of the tags on creation @@ -175,7 +174,11 @@ func newRootScope(opts ScopeOptions, interval time.Duration) *scope { s.registry = newScopeRegistry(s) if interval > 0 { - go s.reportLoop(interval) + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.reportLoop(interval) + }() } return s @@ -235,27 +238,21 @@ func (s *scope) reportLoop(interval time.Duration) { select { case <-ticker.C: s.reportLoopRun() - case <-s.status.quit: + case <-s.done: return } } } func (s *scope) reportLoopRun() { - // Need to hold a status lock to ensure not to report - // and flush after a close - s.status.RLock() - defer s.status.RUnlock() - - if s.status.closed { + if s.closed.Load() { return } - s.reportRegistryWithLock() + s.reportRegistry() } -// reports current registry with scope status lock held -func (s *scope) reportRegistryWithLock() { +func (s *scope) reportRegistry() { if s.reporter != nil { s.registry.Report(s.reporter) s.reporter.Flush() @@ -382,6 +379,11 @@ func (s *scope) Histogram(name string, b Buckets) Histogram { b = s.defaultBuckets } + htype := valueHistogramType + if _, ok := b.(DurationBuckets); ok { + htype = durationHistogramType + } + s.hm.Lock() defer s.hm.Unlock() @@ -397,7 +399,12 @@ func (s *scope) Histogram(name string, b Buckets) Histogram { } h := newHistogram( - s.fullyQualifiedName(name), s.tags, s.reporter, b, cachedHistogram, + htype, + s.fullyQualifiedName(name), + s.tags, + s.reporter, + s.bucketCache.Get(htype, b), + cachedHistogram, ) s.histograms[name] = h s.histogramsSlice = append(s.histogramsSlice, h) @@ -495,25 +502,54 @@ func (s *scope) Snapshot() Snapshot { } func (s *scope) Close() error { - s.status.Lock() - defer s.status.Unlock() - - // don't wait to close more than once (panic on double close of - // s.status.quit) - if s.status.closed { + // n.b. Once this flag is set, the next scope report will remove it from + // the registry and clear its metrics. + if !s.closed.CAS(false, true) { return nil } - s.status.closed = true - close(s.status.quit) - s.reportRegistryWithLock() + close(s.done) - if closer, ok := s.baseReporter.(io.Closer); ok { - return closer.Close() + if s.root { + s.reportRegistry() + if closer, ok := s.baseReporter.(io.Closer); ok { + return closer.Close() + } } + return nil } +func (s *scope) clearMetrics() { + s.cm.Lock() + s.gm.Lock() + s.tm.Lock() + s.hm.Lock() + defer s.cm.Unlock() + defer s.gm.Unlock() + defer s.tm.Unlock() + defer s.hm.Unlock() + + for k := range s.counters { + delete(s.counters, k) + } + s.countersSlice = nil + + for k := range s.gauges { + delete(s.gauges, k) + } + s.gaugesSlice = nil + + for k := range s.timers { + delete(s.timers, k) + } + + for k := range s.histograms { + delete(s.histograms, k) + } + s.histogramsSlice = nil +} + // NB(prateek): We assume concatenation of sanitized inputs is // sanitized. If that stops being true, then we need to sanitize the // output of this function. diff --git a/vendor/github.com/uber-go/tally/scope_registry.go b/vendor/github.com/uber-go/tally/v4/scope_registry.go similarity index 74% rename from vendor/github.com/uber-go/tally/scope_registry.go rename to vendor/github.com/uber-go/tally/v4/scope_registry.go index 6ed5eb4fd..a6929e013 100644 --- a/vendor/github.com/uber-go/tally/scope_registry.go +++ b/vendor/github.com/uber-go/tally/v4/scope_registry.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,17 +20,21 @@ package tally -import "sync" +import ( + "sync" +) var scopeRegistryKey = keyForPrefixedStringMaps type scopeRegistry struct { mu sync.RWMutex + root *scope subscopes map[string]*scope } func newScopeRegistry(root *scope) *scopeRegistry { r := &scopeRegistry{ + root: root, subscopes: make(map[string]*scope), } r.subscopes[scopeRegistryKey(root.prefix, root.tags)] = root @@ -38,20 +42,33 @@ func newScopeRegistry(root *scope) *scopeRegistry { } func (r *scopeRegistry) Report(reporter StatsReporter) { + defer r.purgeIfRootClosed() r.mu.RLock() defer r.mu.RUnlock() - for _, s := range r.subscopes { + for name, s := range r.subscopes { s.report(reporter) + + if s.closed.Load() { + r.removeWithRLock(name) + s.clearMetrics() + } } } func (r *scopeRegistry) CachedReport() { + defer r.purgeIfRootClosed() + r.mu.RLock() defer r.mu.RUnlock() - for _, s := range r.subscopes { + for name, s := range r.subscopes { s.cachedReport() + + if s.closed.Load() { + r.removeWithRLock(name) + s.clearMetrics() + } } } @@ -65,6 +82,10 @@ func (r *scopeRegistry) ForEachScope(f func(*scope)) { } func (r *scopeRegistry) Subscope(parent *scope, prefix string, tags map[string]string) *scope { + if r.root.closed.Load() || parent.closed.Load() { + return NoopScope.(*scope) + } + key := scopeRegistryKey(prefix, parent.tags, tags) r.mu.RLock() @@ -102,6 +123,8 @@ func (r *scopeRegistry) Subscope(parent *scope, prefix string, tags map[string]s histograms: make(map[string]*histogram), histogramsSlice: make([]*histogram, 0, _defaultInitialSliceSize), timers: make(map[string]*timer), + bucketCache: parent.bucketCache, + done: make(chan struct{}), } r.subscopes[key] = subscope return subscope @@ -111,3 +134,28 @@ func (r *scopeRegistry) lockedLookup(key string) (*scope, bool) { ss, ok := r.subscopes[key] return ss, ok } + +func (r *scopeRegistry) purgeIfRootClosed() { + if !r.root.closed.Load() { + return + } + + r.mu.Lock() + defer r.mu.Unlock() + + for k, s := range r.subscopes { + _ = s.Close() + s.clearMetrics() + delete(r.subscopes, k) + } +} + +func (r *scopeRegistry) removeWithRLock(key string) { + // n.b. This function must lock the registry for writing and return it to an + // RLocked state prior to exiting. Defer order is important (LIFO). + r.mu.RUnlock() + defer r.mu.RLock() + r.mu.Lock() + defer r.mu.Unlock() + delete(r.subscopes, key) +} diff --git a/vendor/github.com/uber-go/tally/stats.go b/vendor/github.com/uber-go/tally/v4/stats.go similarity index 68% rename from vendor/github.com/uber-go/tally/stats.go rename to vendor/github.com/uber-go/tally/v4/stats.go index 247ee465b..453d3e25e 100644 --- a/vendor/github.com/uber-go/tally/stats.go +++ b/vendor/github.com/uber-go/tally/v4/stats.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -21,11 +21,14 @@ package tally import ( + "fmt" "math" "sort" "sync" "sync/atomic" "time" + + "github.com/uber-go/tally/v4/internal/identity" ) var ( @@ -232,7 +235,7 @@ func (r *timerNoReporterSink) ReportHistogramValueSamples( name string, tags map[string]string, buckets Buckets, - bucketLowerBound, + bucketLowerBound float64, bucketUpperBound float64, samples int64, ) { @@ -242,7 +245,7 @@ func (r *timerNoReporterSink) ReportHistogramDurationSamples( name string, tags map[string]string, buckets Buckets, - bucketLowerBound, + bucketLowerBound time.Duration, bucketUpperBound time.Duration, samples int64, ) { @@ -255,15 +258,19 @@ func (r *timerNoReporterSink) Capabilities() Capabilities { func (r *timerNoReporterSink) Flush() { } +type sampleCounter struct { + counter *counter + cachedBucket CachedHistogramBucket +} + type histogram struct { - htype histogramType - name string - tags map[string]string - reporter StatsReporter - specification Buckets - buckets []histogramBucket - lookupByValue []float64 - lookupByDuration []int + htype histogramType + name string + tags map[string]string + reporter StatsReporter + specification Buckets + buckets []histogramBucket + samples []sampleCounter } type histogramType int @@ -274,96 +281,119 @@ const ( ) func newHistogram( + htype histogramType, name string, tags map[string]string, reporter StatsReporter, - buckets Buckets, + storage bucketStorage, cachedHistogram CachedHistogram, ) *histogram { - htype := valueHistogramType - if _, ok := buckets.(DurationBuckets); ok { - htype = durationHistogramType - } - - pairs := BucketPairs(buckets) - h := &histogram{ - htype: htype, - name: name, - tags: tags, - reporter: reporter, - specification: buckets, - buckets: make([]histogramBucket, 0, len(pairs)), - lookupByValue: make([]float64, 0, len(pairs)), - lookupByDuration: make([]int, 0, len(pairs)), + htype: htype, + name: name, + tags: tags, + reporter: reporter, + specification: storage.buckets, + buckets: storage.hbuckets, + samples: make([]sampleCounter, len(storage.hbuckets)), } - for _, pair := range pairs { - h.addBucket(newHistogramBucket(h, - pair.LowerBoundValue(), pair.UpperBoundValue(), - pair.LowerBoundDuration(), pair.UpperBoundDuration(), - cachedHistogram)) + for i := range h.samples { + h.samples[i].counter = newCounter(nil) + + if cachedHistogram != nil { + switch htype { + case durationHistogramType: + h.samples[i].cachedBucket = cachedHistogram.DurationBucket( + durationLowerBound(storage.hbuckets, i), + storage.hbuckets[i].durationUpperBound, + ) + case valueHistogramType: + h.samples[i].cachedBucket = cachedHistogram.ValueBucket( + valueLowerBound(storage.hbuckets, i), + storage.hbuckets[i].valueUpperBound, + ) + } + } } return h } -func (h *histogram) addBucket(b histogramBucket) { - h.buckets = append(h.buckets, b) - h.lookupByValue = append(h.lookupByValue, b.valueUpperBound) - h.lookupByDuration = append(h.lookupByDuration, int(b.durationUpperBound)) -} - func (h *histogram) report(name string, tags map[string]string, r StatsReporter) { for i := range h.buckets { - samples := h.buckets[i].samples.value() + samples := h.samples[i].counter.value() if samples == 0 { continue } + switch h.htype { case valueHistogramType: - r.ReportHistogramValueSamples(name, tags, h.specification, - h.buckets[i].valueLowerBound, h.buckets[i].valueUpperBound, - samples) + r.ReportHistogramValueSamples( + name, + tags, + h.specification, + valueLowerBound(h.buckets, i), + h.buckets[i].valueUpperBound, + samples, + ) case durationHistogramType: - r.ReportHistogramDurationSamples(name, tags, h.specification, - h.buckets[i].durationLowerBound, h.buckets[i].durationUpperBound, - samples) + r.ReportHistogramDurationSamples( + name, + tags, + h.specification, + durationLowerBound(h.buckets, i), + h.buckets[i].durationUpperBound, + samples, + ) } } } func (h *histogram) cachedReport() { for i := range h.buckets { - samples := h.buckets[i].samples.value() + samples := h.samples[i].counter.value() if samples == 0 { continue } + switch h.htype { case valueHistogramType: - h.buckets[i].cachedValueBucket.ReportSamples(samples) + h.samples[i].cachedBucket.ReportSamples(samples) case durationHistogramType: - h.buckets[i].cachedDurationBucket.ReportSamples(samples) + h.samples[i].cachedBucket.ReportSamples(samples) } } } func (h *histogram) RecordValue(value float64) { + if h.htype != valueHistogramType { + return + } + // Find the highest inclusive of the bucket upper bound // and emit directly to it. Since we use BucketPairs to derive // buckets there will always be an inclusive bucket as // we always have a math.MaxFloat64 bucket. - idx := sort.SearchFloat64s(h.lookupByValue, value) - h.buckets[idx].samples.Inc(1) + idx := sort.Search(len(h.buckets), func(i int) bool { + return h.buckets[i].valueUpperBound >= value + }) + h.samples[idx].counter.Inc(1) } func (h *histogram) RecordDuration(value time.Duration) { + if h.htype != durationHistogramType { + return + } + // Find the highest inclusive of the bucket upper bound // and emit directly to it. Since we use BucketPairs to derive // buckets there will always be an inclusive bucket as // we always have a math.MaxInt64 bucket. - idx := sort.SearchInts(h.lookupByDuration, int(value)) - h.buckets[idx].samples.Inc(1) + idx := sort.Search(len(h.buckets), func(i int) bool { + return h.buckets[i].durationUpperBound >= value + }) + h.samples[idx].counter.Inc(1) } func (h *histogram) Start() Stopwatch { @@ -376,66 +406,112 @@ func (h *histogram) RecordStopwatch(stopwatchStart time.Time) { } func (h *histogram) snapshotValues() map[float64]int64 { - if h.htype == durationHistogramType { + if h.htype != valueHistogramType { return nil } vals := make(map[float64]int64, len(h.buckets)) for i := range h.buckets { - vals[h.buckets[i].valueUpperBound] = h.buckets[i].samples.snapshot() + vals[h.buckets[i].valueUpperBound] = h.samples[i].counter.snapshot() } return vals } func (h *histogram) snapshotDurations() map[time.Duration]int64 { - if h.htype == valueHistogramType { + if h.htype != durationHistogramType { return nil } durations := make(map[time.Duration]int64, len(h.buckets)) for i := range h.buckets { - durations[h.buckets[i].durationUpperBound] = h.buckets[i].samples.snapshot() + durations[h.buckets[i].durationUpperBound] = h.samples[i].counter.snapshot() } return durations } type histogramBucket struct { - h *histogram - samples *counter - valueLowerBound float64 valueUpperBound float64 - durationLowerBound time.Duration durationUpperBound time.Duration cachedValueBucket CachedHistogramBucket cachedDurationBucket CachedHistogramBucket } -func newHistogramBucket( - h *histogram, - valueLowerBound, - valueUpperBound float64, - durationLowerBound, - durationUpperBound time.Duration, - cachedHistogram CachedHistogram, -) histogramBucket { - bucket := histogramBucket{ - samples: newCounter(nil), - valueLowerBound: valueLowerBound, - valueUpperBound: valueUpperBound, - durationLowerBound: durationLowerBound, - durationUpperBound: durationUpperBound, +func durationLowerBound(buckets []histogramBucket, i int) time.Duration { + if i <= 0 { + return time.Duration(math.MinInt64) + } + return buckets[i-1].durationUpperBound +} + +func valueLowerBound(buckets []histogramBucket, i int) float64 { + if i <= 0 { + return -math.MaxFloat64 + } + return buckets[i-1].valueUpperBound +} + +type bucketStorage struct { + buckets Buckets + hbuckets []histogramBucket +} + +func newBucketStorage( + htype histogramType, + buckets Buckets, +) bucketStorage { + var ( + pairs = BucketPairs(buckets) + storage = bucketStorage{ + buckets: buckets, + hbuckets: make([]histogramBucket, 0, len(pairs)), + } + ) + + for _, pair := range pairs { + storage.hbuckets = append(storage.hbuckets, histogramBucket{ + valueUpperBound: pair.UpperBoundValue(), + durationUpperBound: pair.UpperBoundDuration(), + }) } - if cachedHistogram != nil { - bucket.cachedValueBucket = cachedHistogram.ValueBucket( - bucket.valueLowerBound, bucket.valueUpperBound, - ) - bucket.cachedDurationBucket = cachedHistogram.DurationBucket( - bucket.durationLowerBound, bucket.durationUpperBound, - ) + + return storage +} + +type bucketCache struct { + mtx sync.RWMutex + cache map[uint64]bucketStorage +} + +func newBucketCache() *bucketCache { + return &bucketCache{ + cache: make(map[uint64]bucketStorage), } - return bucket +} + +func (c *bucketCache) Get( + htype histogramType, + buckets Buckets, +) bucketStorage { + id := getBucketsIdentity(buckets) + + c.mtx.RLock() + storage, ok := c.cache[id] + if !ok { + c.mtx.RUnlock() + c.mtx.Lock() + storage = newBucketStorage(htype, buckets) + c.cache[id] = storage + c.mtx.Unlock() + } else { + c.mtx.RUnlock() + if !bucketsEqual(buckets, storage.buckets) { + storage = newBucketStorage(htype, buckets) + } + } + + return storage } // NullStatsReporter is an implementation of StatsReporter than simply does nothing. @@ -443,10 +519,13 @@ var NullStatsReporter StatsReporter = nullStatsReporter{} func (r nullStatsReporter) ReportCounter(name string, tags map[string]string, value int64) { } + func (r nullStatsReporter) ReportGauge(name string, tags map[string]string, value float64) { } + func (r nullStatsReporter) ReportTimer(name string, tags map[string]string, interval time.Duration) { } + func (r nullStatsReporter) ReportHistogramValueSamples( name string, tags map[string]string, @@ -466,10 +545,23 @@ func (r nullStatsReporter) ReportHistogramDurationSamples( samples int64, ) { } + func (r nullStatsReporter) Capabilities() Capabilities { return capabilitiesNone } + func (r nullStatsReporter) Flush() { } type nullStatsReporter struct{} + +func getBucketsIdentity(buckets Buckets) uint64 { + switch b := buckets.(type) { + case DurationBuckets: + return identity.Durations(b.AsDurations()) + case ValueBuckets: + return identity.Float64s(b.AsValues()) + default: + panic(fmt.Sprintf("unexpected bucket type: %T", b)) + } +} diff --git a/vendor/github.com/uber-go/tally/types.go b/vendor/github.com/uber-go/tally/v4/types.go similarity index 99% rename from vendor/github.com/uber-go/tally/types.go rename to vendor/github.com/uber-go/tally/v4/types.go index 76d3749e2..1a15971d9 100644 --- a/vendor/github.com/uber-go/tally/types.go +++ b/vendor/github.com/uber-go/tally/v4/types.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/uber-go/tally/v4/version.go b/vendor/github.com/uber-go/tally/v4/version.go new file mode 100644 index 000000000..264b8e054 --- /dev/null +++ b/vendor/github.com/uber-go/tally/v4/version.go @@ -0,0 +1,24 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package tally + +// Version is the current version of the library. +const Version = "4.0.0" diff --git a/vendor/github.com/willf/bitset/Makefile b/vendor/github.com/willf/bitset/Makefile deleted file mode 100644 index ad71f6a4a..000000000 --- a/vendor/github.com/willf/bitset/Makefile +++ /dev/null @@ -1,197 +0,0 @@ -# MAKEFILE -# -# @author Nicola Asuni -# @link https://github.com/willf/bitset -# ------------------------------------------------------------------------------ - -# List special make targets that are not associated with files -.PHONY: help all test format fmtcheck vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan qa deps clean nuke - -# Use bash as shell (Note: Ubuntu now uses dash which doesn't support PIPESTATUS). -SHELL=/bin/bash - -# CVS path (path to the parent dir containing the project) -CVSPATH=github.com/willf - -# Project owner -OWNER=willf - -# Project vendor -VENDOR=willf - -# Project name -PROJECT=bitset - -# Project version -VERSION=$(shell cat VERSION) - -# Name of RPM or DEB package -PKGNAME=${VENDOR}-${PROJECT} - -# Current directory -CURRENTDIR=$(shell pwd) - -# GO lang path -ifneq ($(GOPATH),) - ifeq ($(findstring $(GOPATH),$(CURRENTDIR)),) - # the defined GOPATH is not valid - GOPATH= - endif -endif -ifeq ($(GOPATH),) - # extract the GOPATH - GOPATH=$(firstword $(subst /src/, ,$(CURRENTDIR))) -endif - -# --- MAKE TARGETS --- - -# Display general help about this command -help: - @echo "" - @echo "$(PROJECT) Makefile." - @echo "GOPATH=$(GOPATH)" - @echo "The following commands are available:" - @echo "" - @echo " make qa : Run all the tests" - @echo " make test : Run the unit tests" - @echo "" - @echo " make format : Format the source code" - @echo " make fmtcheck : Check if the source code has been formatted" - @echo " make vet : Check for suspicious constructs" - @echo " make lint : Check for style errors" - @echo " make coverage : Generate the coverage report" - @echo " make cyclo : Generate the cyclomatic complexity report" - @echo " make ineffassign : Detect ineffectual assignments" - @echo " make misspell : Detect commonly misspelled words in source files" - @echo " make structcheck : Find unused struct fields" - @echo " make varcheck : Find unused global variables and constants" - @echo " make errcheck : Check that error return values are used" - @echo " make gosimple : Suggest code simplifications" - @echo " make astscan : GO AST scanner" - @echo "" - @echo " make docs : Generate source code documentation" - @echo "" - @echo " make deps : Get the dependencies" - @echo " make clean : Remove any build artifact" - @echo " make nuke : Deletes any intermediate file" - @echo "" - -# Alias for help target -all: help - -# Run the unit tests -test: - @mkdir -p target/test - @mkdir -p target/report - GOPATH=$(GOPATH) \ - go test \ - -covermode=atomic \ - -bench=. \ - -race \ - -cpuprofile=target/report/cpu.out \ - -memprofile=target/report/mem.out \ - -mutexprofile=target/report/mutex.out \ - -coverprofile=target/report/coverage.out \ - -v ./... | \ - tee >(PATH=$(GOPATH)/bin:$(PATH) go-junit-report > target/test/report.xml); \ - test $${PIPESTATUS[0]} -eq 0 - -# Format the source code -format: - @find . -type f -name "*.go" -exec gofmt -s -w {} \; - -# Check if the source code has been formatted -fmtcheck: - @mkdir -p target - @find . -type f -name "*.go" -exec gofmt -s -d {} \; | tee target/format.diff - @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; } - -# Check for syntax errors -vet: - GOPATH=$(GOPATH) go vet . - -# Check for style errors -lint: - GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint . - -# Generate the coverage report -coverage: - @mkdir -p target/report - GOPATH=$(GOPATH) \ - go tool cover -html=target/report/coverage.out -o target/report/coverage.html - -# Report cyclomatic complexity -cyclo: - @mkdir -p target/report - GOPATH=$(GOPATH) gocyclo -avg ./ | tee target/report/cyclo.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Detect ineffectual assignments -ineffassign: - @mkdir -p target/report - GOPATH=$(GOPATH) ineffassign ./ | tee target/report/ineffassign.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Detect commonly misspelled words in source files -misspell: - @mkdir -p target/report - GOPATH=$(GOPATH) misspell -error ./ | tee target/report/misspell.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Find unused struct fields -structcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) structcheck -a ./ | tee target/report/structcheck.txt - -# Find unused global variables and constants -varcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) varcheck -e ./ | tee target/report/varcheck.txt - -# Check that error return values are used -errcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) errcheck ./ | tee target/report/errcheck.txt - -# Suggest code simplifications -gosimple: - @mkdir -p target/report - GOPATH=$(GOPATH) gosimple ./ | tee target/report/gosimple.txt - -# AST scanner -astscan: - @mkdir -p target/report - GOPATH=$(GOPATH) gosec . | tee target/report/astscan.txt ; test $${PIPESTATUS[0]} -eq 0 || true - -# Generate source docs -docs: - @mkdir -p target/docs - nohup sh -c 'GOPATH=$(GOPATH) godoc -http=127.0.0.1:6060' > target/godoc_server.log 2>&1 & - wget --directory-prefix=target/docs/ --execute robots=off --retry-connrefused --recursive --no-parent --adjust-extension --page-requisites --convert-links http://127.0.0.1:6060/pkg/github.com/${VENDOR}/${PROJECT}/ ; kill -9 `lsof -ti :6060` - @echo ''${PKGNAME}' Documentation ...' > target/docs/index.html - -# Alias to run all quality-assurance checks -qa: fmtcheck test vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan - -# --- INSTALL --- - -# Get the dependencies -deps: - GOPATH=$(GOPATH) go get ./... - GOPATH=$(GOPATH) go get golang.org/x/lint/golint - GOPATH=$(GOPATH) go get github.com/jstemmer/go-junit-report - GOPATH=$(GOPATH) go get github.com/axw/gocov/gocov - GOPATH=$(GOPATH) go get github.com/fzipp/gocyclo - GOPATH=$(GOPATH) go get github.com/gordonklaus/ineffassign - GOPATH=$(GOPATH) go get github.com/client9/misspell/cmd/misspell - GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/structcheck - GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/varcheck - GOPATH=$(GOPATH) go get github.com/kisielk/errcheck - GOPATH=$(GOPATH) go get honnef.co/go/tools/cmd/gosimple - GOPATH=$(GOPATH) go get github.com/securego/gosec/cmd/gosec/... - -# Remove any build artifact -clean: - GOPATH=$(GOPATH) go clean ./... - -# Deletes any intermediate file -nuke: - rm -rf ./target - GOPATH=$(GOPATH) go clean -i ./... diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore deleted file mode 100644 index 3bcd8cbaf..000000000 --- a/vendor/go.etcd.io/bbolt/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.prof -*.test -*.swp -/bin/ -cover.out diff --git a/vendor/go.etcd.io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml deleted file mode 100644 index 257dfdfee..000000000 --- a/vendor/go.etcd.io/bbolt/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go -go_import_path: go.etcd.io/bbolt - -sudo: false - -go: -- 1.12 - -before_install: -- go get -v honnef.co/go/tools/... -- go get -v github.com/kisielk/errcheck - -script: -- make fmt -- make test -- make race -# - make errcheck diff --git a/vendor/go.etcd.io/bbolt/LICENSE b/vendor/go.etcd.io/bbolt/LICENSE deleted file mode 100644 index 004e77fe5..000000000 --- a/vendor/go.etcd.io/bbolt/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile deleted file mode 100644 index 2968aaa61..000000000 --- a/vendor/go.etcd.io/bbolt/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)" - @echo "array freelist test" - @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)" - -fmt: - !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') - -# go get honnef.co/go/tools/simple -gosimple: - gosimple ./... - -# go get honnef.co/go/tools/unused -unused: - unused ./... - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt - -test: - TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic - # Note: gets "program not an importable package" in out of path builds - TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt - - @echo "array freelist test" - - @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic - # Note: gets "program not an importable package" in out of path builds - @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt - -.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md deleted file mode 100644 index c9e64b1a6..000000000 --- a/vendor/go.etcd.io/bbolt/README.md +++ /dev/null @@ -1,957 +0,0 @@ -bbolt -===== - -[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) -[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) -[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) -[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) -[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) - -bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value -store. The purpose of this fork is to provide the Go community with an active -maintenance and development target for Bolt; the goal is improved reliability -and stability. bbolt includes bug fixes, performance enhancements, and features -not found in Bolt while preserving backwards compatibility with the Bolt API. - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] -[LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[gh_ben]: https://github.com/benbjohnson -[bolt]: https://github.com/boltdb/bolt -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ - -## Project Status - -Bolt is stable, the API is fixed, and the file format is fixed. Full unit -test coverage and randomized black box testing are used to ensure database -consistency and thread safety. Bolt is currently used in high-load production -environments serving databases as large as 1TB. Many companies such as -Shopify and Heroku use Bolt-backed services every day. - -## Project versioning - -bbolt uses [semantic versioning](http://semver.org). -API should not change between patch and minor releases. -New minor versions may add additional features to the API. - -## Table of Contents - - - [Getting Started](#getting-started) - - [Installing](#installing) - - [Opening a database](#opening-a-database) - - [Transactions](#transactions) - - [Read-write transactions](#read-write-transactions) - - [Read-only transactions](#read-only-transactions) - - [Batch read-write transactions](#batch-read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - - [Using buckets](#using-buckets) - - [Using key/value pairs](#using-keyvalue-pairs) - - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) - - [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Range scans](#range-scans) - - [ForEach()](#foreach) - - [Nested buckets](#nested-buckets) - - [Database backups](#database-backups) - - [Statistics](#statistics) - - [Read-Only Mode](#read-only-mode) - - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) - - [Resources](#resources) - - [Comparison with other databases](#comparison-with-other-databases) - - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) - - [LevelDB, RocksDB](#leveldb-rocksdb) - - [LMDB](#lmdb) - - [Caveats & Limitations](#caveats--limitations) - - [Reading the Source](#reading-the-source) - - [Other Projects Using Bolt](#other-projects-using-bolt) - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: - -```sh -$ go get go.etcd.io/bbolt/... -``` - -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. - - -### Importing bbolt - -To use bbolt as an embedded key-value store, import as: - -```go -import bolt "go.etcd.io/bbolt" - -db, err := bolt.Open(path, 0666, nil) -if err != nil { - return err -} -defer db.Close() -``` - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - bolt "go.etcd.io/bbolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Transactions should not depend on one another and generally shouldn't be opened -simultaneously in the same goroutine. This can cause a deadlock as the read-write -transaction needs to periodically re-map the data file but it cannot do so while -any read-only transaction is open. Even a nested read-only transaction can cause -a deadlock, as the child transaction can block the parent transaction from releasing -its resources. - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `DB.Begin()` function directly but **please** be sure to close -the transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `Tx.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guaranteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Autoincrementing integer for the bucket -By using the `NextSequence()` function, you can let Bolt determine a sequence -which can be used as the unique identifier for your key/value pairs. See the -example below. - -```go -// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. -func (s *Store) CreateUser(u *User) error { - return s.db.Update(func(tx *bolt.Tx) error { - // Retrieve the users bucket. - // This should be created when the DB is first opened. - b := tx.Bucket([]byte("users")) - - // Generate ID for the user. - // This returns an error only if the Tx is closed or not writeable. - // That can't happen in an Update() call so I ignore the error check. - id, _ := b.NextSequence() - u.ID = int(id) - - // Marshal user data into bytes. - buf, err := json.Marshal(u) - if err != nil { - return err - } - - // Persist bytes to users bucket. - return b.Put(itob(u.ID), buf) - }) -} - -// itob returns an 8-byte big endian representation of v. -func itob(v int) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(v)) - return b -} - -type User struct { - ID int - ... -} -``` - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. - -During iteration, if the key is non-`nil` but the value is `nil`, that means -the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to -access the sub-bucket. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket exists and has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - -Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - -Please note that keys and values in `ForEach()` are only valid while -the transaction is open. If you need to use a key or value outside of -the transaction, you must use `copy()` to copy it to another byte -slice. - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - -Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. - -```go - -// createUser creates a new user in the given account. -func createUser(accountID int, u *User) error { - // Start the transaction. - tx, err := db.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - // Retrieve the root bucket for the account. - // Assume this has already been created when the account was set up. - root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) - - // Setup the users bucket. - bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) - if err != nil { - return err - } - - // Generate an ID for the new user. - userID, err := bkt.NextSequence() - if err != nil { - return err - } - u.ID = userID - - // Marshal and save the encoded user. - if buf, err := json.Marshal(u); err != nil { - return err - } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { - return err - } - - // Commit the transaction. - if err := tx.Commit(); err != nil { - return err - } - - return nil -} - -``` - - - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. - -By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx) -documentation for information about optimizing for larger-than-RAM datasets. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - -### Mobile Use (iOS/Android) - -Bolt is able to run on mobile devices by leveraging the binding feature of the -[gomobile](https://github.com/golang/mobile) tool. Create a struct that will -contain your database logic and a reference to a `*bolt.DB` with a initializing -constructor that takes in a filepath where the database file will be stored. -Neither Android nor iOS require extra permissions or cleanup from using this method. - -```go -func NewBoltDB(filepath string) *BoltDB { - db, err := bolt.Open(filepath+"/demo.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - - return &BoltDB{db} -} - -type BoltDB struct { - db *bolt.DB - ... -} - -func (b *BoltDB) Path() string { - return b.db.Path() -} - -func (b *BoltDB) Close() { - b.db.Close() -} -``` - -Database logic should be defined as methods on this wrapper struct. - -To initialize this struct from the native language (both platforms now sync -their local storage to the cloud. These snippets disable that functionality for the -database file): - -#### Android - -```java -String path; -if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ - path = getNoBackupFilesDir().getAbsolutePath(); -} else{ - path = getFilesDir().getAbsolutePath(); -} -Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) -``` - -#### iOS - -```objc -- (void)demo { - NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, - NSUserDomainMask, - YES) objectAtIndex:0]; - GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); - [self addSkipBackupAttributeToItemAtPath:demo.path]; - //Some DB Logic would go here - [demo close]; -} - -- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString -{ - NSURL* URL= [NSURL fileURLWithPath: filePathString]; - assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); - - NSError *error = nil; - BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] - forKey: NSURLIsExcludedFromBackupKey error: &error]; - if(!success){ - NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); - } - return success; -} - -``` - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade-offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can use `DB.Batch()` or add a - write-ahead log to help mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Bolt uses an exclusive write lock on the database file so it cannot be - shared by multiple processes. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM, provided its - memory-map fits in the process virtual address space. It may be problematic - on 32-bits systems. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Reading the Source - -Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, -transactional key/value database so it can be a good starting point for people -interested in how databases work. - -The best places to start are the main entry points into Bolt: - -- `Open()` - Initializes the reference to the database. It's responsible for - creating the database if it doesn't exist, obtaining an exclusive lock on the - file, reading the meta pages, & memory-mapping the file. - -- `DB.Begin()` - Starts a read-only or read-write transaction depending on the - value of the `writable` argument. This requires briefly obtaining the "meta" - lock to keep track of open transactions. Only one read-write transaction can - exist at a time so the "rwlock" is acquired during the life of a read-write - transaction. - -- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the - arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket - materializes the underlying page and the page's parent pages into memory as - "nodes". These nodes are where mutations occur during read-write transactions. - These changes get flushed to disk during commit. - -- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor - to move to the page & position of a key/value pair. During a read-only - transaction, the key and value data is returned as a direct reference to the - underlying mmap file so there's no allocation overhead. For read-write - transactions, this data may reference the mmap file or one of the in-memory - node values. - -- `Cursor` - This object is simply for traversing the B+tree of on-disk pages - or in-memory nodes. It can seek to a specific key, move to the first or last - value, or it can move forward or backward. The cursor handles the movement up - and down the B+tree transparently to the end user. - -- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages - into pages to be written to disk. Writing to disk then occurs in two phases. - First, the dirty pages are written to disk and an `fsync()` occurs. Second, a - new meta page with an incremented transaction ID is written and another - `fsync()` occurs. This two phase write ensures that partially written data - pages are ignored in the event of a crash since the meta page pointing to them - is never written. Partially written meta pages are invalidated because they - are written with a checksum. - -If you have additional notes that could be helpful for others, please submit -them via pull request. - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. -* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. -* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. -* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. -* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining - simple tx and key scans. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. -* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. -* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. -* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains -* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more) -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. -* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. -* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. -* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service -* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. -* [stow](https://github.com/djherbis/stow) - a persistence manager for objects - backed by boltdb. -* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. -* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. - -If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/go.etcd.io/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go deleted file mode 100644 index aee25960f..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_386.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go deleted file mode 100644 index 5dd8f3f2a..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_amd64.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go deleted file mode 100644 index aee25960f..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_arm.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go deleted file mode 100644 index 810dfd55c..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build arm64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_linux.go b/vendor/go.etcd.io/bbolt/bolt_linux.go deleted file mode 100644 index 7707bcacf..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -package bbolt - -import ( - "syscall" -) - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return syscall.Fdatasync(int(db.file.Fd())) -} diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go deleted file mode 100644 index dd8ffe123..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_mips64x.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build mips64 mips64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x8000000000 // 512GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go deleted file mode 100644 index a669703a4..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_mipsx.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build mips mipsle - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x40000000 // 1GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_openbsd.go b/vendor/go.etcd.io/bbolt/bolt_openbsd.go deleted file mode 100644 index d7f50358e..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_openbsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package bbolt - -import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data -) - -func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil -} - -func fdatasync(db *DB) error { - if db.data != nil { - return msync(db) - } - return db.file.Sync() -} diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go deleted file mode 100644 index 84e545ef3..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go deleted file mode 100644 index a76120908..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go deleted file mode 100644 index c830f2fc7..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go deleted file mode 100644 index c967613b0..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_riscv64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build riscv64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go deleted file mode 100644 index ff2a56097..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_s390x.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build s390x - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go deleted file mode 100644 index 2938fed58..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ /dev/null @@ -1,93 +0,0 @@ -// +build !windows,!plan9,!solaris,!aix - -package bbolt - -import ( - "fmt" - "syscall" - "time" - "unsafe" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - flag := syscall.LOCK_NB - if exclusive { - flag |= syscall.LOCK_EX - } else { - flag |= syscall.LOCK_SH - } - for { - // Attempt to obtain an exclusive lock. - err := syscall.Flock(int(fd), flag) - if err == nil { - return nil - } else if err != syscall.EWOULDBLOCK { - return err - } - - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - err = madvise(b, syscall.MADV_RANDOM) - if err != nil && err != syscall.ENOSYS { - // Ignore not implemented error in kernel because it still works. - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go deleted file mode 100644 index a64c16f51..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go +++ /dev/null @@ -1,90 +0,0 @@ -// +build aix - -package bbolt - -import ( - "fmt" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - var lockType int16 - if exclusive { - lockType = syscall.F_WRLCK - } else { - lockType = syscall.F_RDLCK - } - for { - // Attempt to obtain an exclusive lock. - lock := syscall.Flock_t{Type: lockType} - err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go deleted file mode 100644 index babad6578..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go +++ /dev/null @@ -1,88 +0,0 @@ -package bbolt - -import ( - "fmt" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - var lockType int16 - if exclusive { - lockType = syscall.F_WRLCK - } else { - lockType = syscall.F_RDLCK - } - for { - // Attempt to obtain an exclusive lock. - lock := syscall.Flock_t{Type: lockType} - err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go deleted file mode 100644 index fca178bd2..000000000 --- a/vendor/go.etcd.io/bbolt/bolt_windows.go +++ /dev/null @@ -1,141 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") -) - -const ( - // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - flagLockExclusive = 2 - flagLockFailImmediately = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) - -func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r == 0 { - return err - } - return nil -} - -func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) - if r == 0 { - return err - } - return nil -} - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - for { - // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range - // -1..0 as the lock on the database file. - var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 - err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{ - Offset: m1, - OffsetHigh: m1, - }) - - if err == nil { - return nil - } else if err != errLockViolation { - return err - } - - // If we timed oumercit then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 - err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{ - Offset: m1, - OffsetHigh: m1, - }) - return err -} - -// mmap memory maps a DB's data file. -// Based on: https://github.com/edsrzf/mmap-go -func mmap(db *DB, sz int) error { - if !db.readOnly { - // Truncate the database to the size of the mmap. - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(sz >> 32) - sizehi := uint32(sz) & 0xffffffff - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) - if h == 0 { - return os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) - if addr == 0 { - return os.NewSyscallError("MapViewOfFile", errno) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { - return os.NewSyscallError("CloseHandle", err) - } - - // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) - db.datasz = sz - - return nil -} - -// munmap unmaps a pointer from a file. -// Based on: https://github.com/edsrzf/mmap-go -func munmap(db *DB) error { - if db.data == nil { - return nil - } - - addr := (uintptr)(unsafe.Pointer(&db.data[0])) - if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) - } - return nil -} diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go deleted file mode 100644 index 9587afefe..000000000 --- a/vendor/go.etcd.io/bbolt/boltsync_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows,!plan9,!linux,!openbsd - -package bbolt - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go deleted file mode 100644 index d8750b148..000000000 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ /dev/null @@ -1,777 +0,0 @@ -package bbolt - -import ( - "bytes" - "fmt" - "unsafe" -) - -const ( - // MaxKeySize is the maximum length of a key, in bytes. - MaxKeySize = 32768 - - // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = (1 << 31) - 2 -) - -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - -const ( - minFillPercent = 0.1 - maxFillPercent = 1.0 -) - -// DefaultFillPercent is the percentage that split pages are filled. -// This value can be changed by setting Bucket.FillPercent. -const DefaultFillPercent = 0.5 - -// Bucket represents a collection of key/value pairs inside the database. -type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache - - // Sets the threshold for filling nodes when they split. By default, - // the bucket will fill to 50% but it can be useful to increase this - // amount if you know that your write workloads are mostly append-only. - // - // This is non-persisted across transactions so it must be set in every Tx. - FillPercent float64 -} - -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - -// newBucket returns a new bucket associated with a transaction. -func newBucket(tx *Tx) Bucket { - var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} - if tx.writable { - b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) - } - return b -} - -// Tx returns the tx of the bucket. -func (b *Bucket) Tx() *Tx { - return b.tx -} - -// Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root -} - -// Writable returns whether the bucket is writable. -func (b *Bucket) Writable() bool { - return b.tx.writable -} - -// Cursor creates a cursor associated with the bucket. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (b *Bucket) Cursor() *Cursor { - // Update transaction statistics. - b.tx.stats.CursorCount++ - - // Allocate and return a cursor. - return &Cursor{ - bucket: b, - stack: make([]elemRef, 0), - } -} - -// Bucket retrieves a nested bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) Bucket(name []byte) *Bucket { - if b.buckets != nil { - if child := b.buckets[string(name)]; child != nil { - return child - } - } - - // Move cursor to key. - c := b.Cursor() - k, v, flags := c.seek(name) - - // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { - return nil - } - - // Otherwise create a bucket and cache it. - var child = b.openBucket(v) - if b.buckets != nil { - b.buckets[string(name)] = child - } - - return child -} - -// Helper method that re-interprets a sub-bucket value -// from a parent into a Bucket -func (b *Bucket) openBucket(value []byte) *Bucket { - var child = newBucket(b.tx) - - // Unaligned access requires a copy to be made. - const unalignedMask = unsafe.Alignof(struct { - bucket - page - }{}) - 1 - unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 - if unaligned { - value = cloneBytes(value) - } - - // If this is a writable transaction then we need to copy the bucket entry. - // Read-only transactions can point directly at the mmap entry. - if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) - } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) - } - - // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - } - - return &child -} - -// CreateBucket creates a new bucket at the given key and returns the new bucket. -// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { - if b.tx.db == nil { - return nil, ErrTxClosed - } else if !b.tx.writable { - return nil, ErrTxNotWritable - } else if len(key) == 0 { - return nil, ErrBucketNameRequired - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key. - if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists - } - return nil, ErrIncompatibleValue - } - - // Create empty, inline bucket. - var bucket = Bucket{ - bucket: &bucket{}, - rootNode: &node{isLeaf: true}, - FillPercent: DefaultFillPercent, - } - var value = bucket.write() - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) - - // Since subbuckets are not allowed on inline buckets, we need to - // dereference the inline page, if it exists. This will cause the bucket - // to be treated as a regular, non-inline bucket for the rest of the tx. - b.page = nil - - return b.Bucket(key), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err - } - return child, nil -} - -// DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exist, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue - } - - // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEach(func(k, v []byte) error { - if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } - } - return nil - }) - if err != nil { - return err - } - - // Remove cached copy. - delete(b.buckets, string(key)) - - // Release all bucket pages to freelist. - child.nodes = nil - child.rootNode = nil - child.free() - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (b *Bucket) Get(key []byte) []byte { - k, v, flags := b.Cursor().seek(key) - - // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { - return nil - } - - // If our target node isn't the same key as what's passed in then return nil. - if !bytes.Equal(key, k) { - return nil - } - return v -} - -// Put sets the value for a key in the bucket. -// If the key exist then its previous value will be overwritten. -// Supplied value must remain valid for the life of the transaction. -// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } else if len(key) == 0 { - return ErrKeyRequired - } else if len(key) > MaxKeySize { - return ErrKeyTooLarge - } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) - - return nil -} - -// Delete removes a key from the bucket. -// If the key does not exist then nothing is done and a nil error is returned. -// Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return nil if the key doesn't exist. - if !bytes.Equal(key, k) { - return nil - } - - // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } - -// SetSequence updates the sequence number for the bucket. -func (b *Bucket) SetSequence(v uint64) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence = v - return nil -} - -// NextSequence returns an autoincrementing integer for the bucket. -func (b *Bucket) NextSequence() (uint64, error) { - if b.tx.db == nil { - return 0, ErrTxClosed - } else if !b.Writable() { - return 0, ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil -} - -// ForEach executes a function for each key/value pair in a bucket. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. The provided function must not modify -// the bucket; this will result in undefined behavior. -func (b *Bucket) ForEach(fn func(k, v []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -// Stat returns stats on a bucket. -func (b *Bucket) Stats() BucketStats { - var s, subStats BucketStats - pageSize := b.tx.db.pageSize - s.BucketN += 1 - if b.root == 0 { - s.InlineBucketN += 1 - } - b.forEachPage(func(p *page, depth int) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) - - // used totals the used bytes for the page - used := pageHeaderSize - - if p.count != 0 { - // If page has any elements, add all element headers. - used += leafPageElementSize * uintptr(p.count-1) - - // Add all element key, value sizes. - // The computation takes advantage of the fact that the position - // of the last element's key/value equals to the total of the sizes - // of all previous elements' keys and values. - // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) - } - - if b.root == 0 { - // For inlined bucket just update the inline stats - s.InlineBucketInuse += int(used) - } else { - // For non-inlined bucket update all the leaf stats - s.LeafPageN++ - s.LeafInuse += int(used) - s.LeafOverflowN += int(p.overflow) - - // Collect stats from sub-buckets. - // Do that by iterating over all element headers - // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { - // For any bucket element, open the element value - // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) - } - } - } - } else if (p.flags & branchPageFlag) != 0 { - s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) - - // used totals the used bytes for the page - // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) - - // Add size of all keys and values. - // Again, use the fact that last element's position equals to - // the total of key, value sizes of all previous elements. - used += uintptr(lastElement.pos + lastElement.ksize) - s.BranchInuse += int(used) - s.BranchOverflowN += int(p.overflow) - } - - // Keep track of maximum page depth. - if depth+1 > s.Depth { - s.Depth = (depth + 1) - } - }) - - // Alloc stats can be computed from page counts and pageSize. - s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize - s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize - - // Add the max depth of sub-buckets to get total nested depth. - s.Depth += subStats.Depth - // Add the stats for all sub-buckets - s.Add(subStats) - return s -} - -// forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int)) { - // If we have an inline page then just use that. - if b.page != nil { - fn(b.page, 0) - return - } - - // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, 0, fn) -} - -// forEachPageNode iterates over every page (or node) in a bucket. -// This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { - // If we have an inline page or root node then just use that. - if b.page != nil { - fn(b.page, nil, 0) - return - } - b._forEachPageNode(b.root, 0, fn) -} - -func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgid) - - // Execute function. - fn(p, n, depth) - - // Recursively loop over children. - if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) - } - } - } else { - if !n.isLeaf { - for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) - } - } - } -} - -// spill writes all the nodes for this bucket to dirty pages. -func (b *Bucket) spill() error { - // Spill all child buckets first. - for name, child := range b.buckets { - // If the child bucket is small enough and it has no child buckets then - // write it inline into the parent bucket's page. Otherwise spill it - // like a normal bucket and make the parent value a pointer to the page. - var value []byte - if child.inlineable() { - child.free() - value = child.write() - } else { - if err := child.spill(); err != nil { - return err - } - - // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket - } - - // Skip writing the bucket if there are no materialized nodes. - if child.rootNode == nil { - continue - } - - // Update parent node. - var c = b.Cursor() - k, _, flags := c.seek([]byte(name)) - if !bytes.Equal([]byte(name), k) { - panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) - } - if flags&bucketLeafFlag == 0 { - panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) - } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) - } - - // Ignore if there's not a materialized root node. - if b.rootNode == nil { - return nil - } - - // Spill nodes. - if err := b.rootNode.spill(); err != nil { - return err - } - b.rootNode = b.rootNode.root() - - // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) - } - b.root = b.rootNode.pgid - - return nil -} - -// inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. -func (b *Bucket) inlineable() bool { - var n = b.rootNode - - // Bucket must only contain a single leaf node. - if n == nil || !n.isLeaf { - return false - } - - // Bucket is not inlineable if it contains subbuckets or if it goes beyond - // our threshold for inline bucket size. - var size = pageHeaderSize - for _, inode := range n.inodes { - size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) - - if inode.flags&bucketLeafFlag != 0 { - return false - } else if size > b.maxInlineBucketSize() { - return false - } - } - - return true -} - -// Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() uintptr { - return uintptr(b.tx.db.pageSize / 4) -} - -// write allocates and writes a bucket to a byte slice. -func (b *Bucket) write() []byte { - // Allocate the appropriate size. - var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) - - // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket - - // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - n.write(p) - - return value -} - -// rebalance attempts to balance all nodes. -func (b *Bucket) rebalance() { - for _, n := range b.nodes { - n.rebalance() - } - for _, child := range b.buckets { - child.rebalance() - } -} - -// node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgid pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") - - // Retrieve node if it's already been created. - if n := b.nodes[pgid]; n != nil { - return n - } - - // Otherwise create a node and cache it. - n := &node{bucket: b, parent: parent} - if parent == nil { - b.rootNode = n - } else { - parent.children = append(parent.children, n) - } - - // Use the inline page if this is an inline bucket. - var p = b.page - if p == nil { - p = b.tx.page(pgid) - } - - // Read the page into the node and cache it. - n.read(p) - b.nodes[pgid] = n - - // Update statistics. - b.tx.stats.NodeCount++ - - return n -} - -// free recursively frees all pages in the bucket. -func (b *Bucket) free() { - if b.root == 0 { - return - } - - var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { - if p != nil { - tx.db.freelist.free(tx.meta.txid, p) - } else { - n.free() - } - }) - b.root = 0 -} - -// dereference removes all references to the old mmap. -func (b *Bucket) dereference() { - if b.rootNode != nil { - b.rootNode.root().dereference() - } - - for _, child := range b.buckets { - child.dereference() - } -} - -// pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { - // Inline buckets have a fake page embedded in their value so treat them - // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { - if id != 0 { - panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) - } - if b.rootNode != nil { - return nil, b.rootNode - } - return b.page, nil - } - - // Check the node cache for non-inline buckets. - if b.nodes != nil { - if n := b.nodes[id]; n != nil { - return nil, n - } - } - - // Finally lookup the page from the transaction if no node is materialized. - return b.tx.page(id), nil -} - -// BucketStats records statistics about resources used by a bucket. -type BucketStats struct { - // Page count statistics. - BranchPageN int // number of logical branch pages - BranchOverflowN int // number of physical branch overflow pages - LeafPageN int // number of logical leaf pages - LeafOverflowN int // number of physical leaf overflow pages - - // Tree statistics. - KeyN int // number of keys/value pairs - Depth int // number of levels in B+tree - - // Page size utilization. - BranchAlloc int // bytes allocated for physical branch pages - BranchInuse int // bytes actually used for branch data - LeafAlloc int // bytes allocated for physical leaf pages - LeafInuse int // bytes actually used for leaf data - - // Bucket statistics - BucketN int // total number of buckets including the top bucket - InlineBucketN int // total number on inlined buckets - InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) -} - -func (s *BucketStats) Add(other BucketStats) { - s.BranchPageN += other.BranchPageN - s.BranchOverflowN += other.BranchOverflowN - s.LeafPageN += other.LeafPageN - s.LeafOverflowN += other.LeafOverflowN - s.KeyN += other.KeyN - if s.Depth < other.Depth { - s.Depth = other.Depth - } - s.BranchAlloc += other.BranchAlloc - s.BranchInuse += other.BranchInuse - s.LeafAlloc += other.LeafAlloc - s.LeafInuse += other.LeafInuse - - s.BucketN += other.BucketN - s.InlineBucketN += other.InlineBucketN - s.InlineBucketInuse += other.InlineBucketInuse -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go deleted file mode 100644 index 98aeb449a..000000000 --- a/vendor/go.etcd.io/bbolt/cursor.go +++ /dev/null @@ -1,396 +0,0 @@ -package bbolt - -import ( - "bytes" - "fmt" - "sort" -) - -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. -// Cursors see nested buckets with value == nil. -// Cursors can be obtained from a transaction and are valid as long as the transaction is open. -// -// Keys and values returned from the cursor are only valid for the life of the transaction. -// -// Changing data while traversing with a cursor may cause it to be invalidated -// and return unexpected keys and/or values. You must reposition your cursor -// after mutating data. -type Cursor struct { - bucket *Bucket - stack []elemRef -} - -// Bucket returns the bucket that this cursor was created from. -func (c *Cursor) Bucket() *Bucket { - return c.bucket -} - -// First moves the cursor to the first item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.first() - - // If we land on an empty page then move to the next value. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - c.next() - } - - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v - -} - -// Last moves the cursor to the last item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - ref := elemRef{page: p, node: n} - ref.index = ref.count() - 1 - c.stack = append(c.stack, ref) - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Next moves the cursor to the next item in the bucket and returns its key and value. -// If the cursor is at the end of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. If no keys -// follow, a nil key is returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - k, v, flags := c.seek(seek) - - // If we ended up after the last element of a page then move to the next one. - if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { - k, v, flags = c.next() - } - - if k == nil { - return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Delete removes the current key/value under the cursor from the bucket. -// Delete fails if current key/value is a bucket or if the transaction is not writable. -func (c *Cursor) Delete() error { - if c.bucket.tx.db == nil { - return ErrTxClosed - } else if !c.bucket.Writable() { - return ErrTxNotWritable - } - - key, _, flags := c.keyValue() - // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - c.node().del(key) - - return nil -} - -// seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. -func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Start from root page/node and traverse to correct page. - c.stack = c.stack[:0] - c.search(seek, c.bucket.root) - - // If this is a bucket then return a nil value. - return c.keyValue() -} - -// first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) first() { - for { - // Exit when we hit a leaf page. - var ref = &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the first element to the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - } -} - -// last moves the cursor to the last leaf element under the last page in the stack. -func (c *Cursor) last() { - for { - // Exit when we hit a leaf page. - ref := &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the last element in the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - - var nextRef = elemRef{page: p, node: n} - nextRef.index = nextRef.count() - 1 - c.stack = append(c.stack, nextRef) - } -} - -// next moves to the next leaf element and returns the key and value. -// If the cursor is at the last leaf element then it stays there and returns nil. -func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - for { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break - } - } - - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } - - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - - // If this is an empty page then restart and move back up the stack. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - continue - } - - return c.keyValue() - } -} - -// search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgid pgid) { - p, n := c.bucket.pageNode(pgid) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) - } - e := elemRef{page: p, node: n} - c.stack = append(c.stack, e) - - // If we're on a leaf page/node then find the specific node. - if e.isLeaf() { - c.nsearch(key) - return - } - - if n != nil { - c.searchNode(key, n) - return - } - c.searchPage(key, p) -} - -func (c *Cursor) searchNode(key []byte, n *node) { - var exact bool - index := sort.Search(len(n.inodes), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) -} - -func (c *Cursor) searchPage(key []byte, p *page) { - // Binary search for the correct range. - inodes := p.branchPageElements() - - var exact bool - index := sort.Search(int(p.count), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, inodes[index].pgid) -} - -// nsearch searches the leaf node on the top of the stack for a key. -func (c *Cursor) nsearch(key []byte) { - e := &c.stack[len(c.stack)-1] - p, n := e.page, e.node - - // If we have a node then search its inodes. - if n != nil { - index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 - }) - e.index = index - return - } - - // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 - }) - e.index = index -} - -// keyValue returns the key and value of the current leaf element. -func (c *Cursor) keyValue() ([]byte, []byte, uint32) { - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.count() == 0 || ref.index >= ref.count() { - return nil, nil, 0 - } - - // Retrieve value from node. - if ref.node != nil { - inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags - } - - // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags -} - -// node returns the node that the cursor is currently positioned on. -func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") - - // If the top of the stack is a leaf node then just return it. - if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { - return ref.node - } - - // Start from root and traverse down the hierarchy. - var n = c.stack[0].node - if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) - } - for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") - n = n.childAt(ref.index) - } - _assert(n.isLeaf, "expected leaf node") - return n -} - -// elemRef represents a reference to an element on a given page/node. -type elemRef struct { - page *page - node *node - index int -} - -// isLeaf returns whether the ref is pointing at a leaf page/node. -func (r *elemRef) isLeaf() bool { - if r.node != nil { - return r.node.isLeaf - } - return (r.page.flags & leafPageFlag) != 0 -} - -// count returns the number of inodes or page elements. -func (r *elemRef) count() int { - if r.node != nil { - return len(r.node.inodes) - } - return int(r.page.count) -} diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go deleted file mode 100644 index 80b0095cc..000000000 --- a/vendor/go.etcd.io/bbolt/db.go +++ /dev/null @@ -1,1174 +0,0 @@ -package bbolt - -import ( - "errors" - "fmt" - "hash/fnv" - "log" - "os" - "runtime" - "sort" - "sync" - "time" - "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -const pgidNoFreelist pgid = 0xffffffffffffffff - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 -) - -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - -// The time elapsed between consecutive file locking attempts. -const flockRetryTimeout = 50 * time.Millisecond - -// FreelistType is the type of the freelist backend -type FreelistType string - -const ( - // FreelistArrayType indicates backend freelist type is array - FreelistArrayType = FreelistType("array") - // FreelistMapType indicates backend freelist type is hashmap - FreelistMapType = FreelistType("hashmap") -) - -// DB represents a collection of buckets persisted to a file on disk. -// All data access is performed through transactions which can be obtained through the DB. -// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. -type DB struct { - // When enabled, the database will perform a Check() after every commit. - // A panic is issued if the database is in an inconsistent state. This - // flag has a large performance impact so it should only be used for - // debugging purposes. - StrictMode bool - - // Setting the NoSync flag will cause the database to skip fsync() - // calls after each commit. This can be useful when bulk loading data - // into a database and you can restart the bulk load in the event of - // a system failure or database corruption. Do not set this flag for - // normal use. - // - // If the package global IgnoreNoSync constant is true, this value is - // ignored. See the comment on that constant for more details. - // - // THIS IS UNSAFE. PLEASE USE WITH CAUTION. - NoSync bool - - // When true, skips syncing freelist to disk. This improves the database - // write performance under normal operation, but requires a full database - // re-sync during recovery. - NoFreelistSync bool - - // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures - // dramatic performance degradation if database is large and framentation in freelist is common. - // The alternative one is using hashmap, it is faster in almost all circumstances - // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. - // The default type is array - FreelistType FreelistType - - // When true, skips the truncate call when growing the database. - // Setting this to true is only safe on non-ext3/ext4 systems. - // Skipping truncation avoids preallocation of hard drive space and - // bypasses a truncate() and fsync() syscall on remapping. - // - // https://github.com/boltdb/bolt/issues/284 - NoGrowSync bool - - // If you want to read the entire database fast, you can set MmapFlag to - // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. - MmapFlags int - - // MaxBatchSize is the maximum size of a batch. Default value is - // copied from DefaultMaxBatchSize in Open. - // - // If <=0, disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchSize int - - // MaxBatchDelay is the maximum delay before a batch starts. - // Default value is copied from DefaultMaxBatchDelay in Open. - // - // If <=0, effectively disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchDelay time.Duration - - // AllocSize is the amount of space allocated when the database - // needs to create new pages. This is done to amortize the cost - // of truncate() and fsync() when growing the data file. - AllocSize int - - path string - openFile func(string, int, os.FileMode) (*os.File, error) - file *os.File - dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte - datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta - pageSize int - opened bool - rwtx *Tx - txs []*Tx - stats Stats - - freelist *freelist - freelistLoad sync.Once - - pagePool sync.Pool - - batchMu sync.Mutex - batch *batch - - rwlock sync.Mutex // Allows only one writer at a time. - metalock sync.Mutex // Protects meta page access. - mmaplock sync.RWMutex // Protects mmap access during remapping. - statlock sync.RWMutex // Protects stats access. - - ops struct { - writeAt func(b []byte, off int64) (n int, err error) - } - - // Read only mode. - // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. - readOnly bool -} - -// Path returns the path to currently open database file. -func (db *DB) Path() string { - return db.path -} - -// GoString returns the Go string representation of the database. -func (db *DB) GoString() string { - return fmt.Sprintf("bolt.DB{path:%q}", db.path) -} - -// String returns the string representation of the database. -func (db *DB) String() string { - return fmt.Sprintf("DB<%q>", db.path) -} - -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. -// Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - db := &DB{ - opened: true, - } - // Set default options if no options are provided. - if options == nil { - options = DefaultOptions - } - db.NoSync = options.NoSync - db.NoGrowSync = options.NoGrowSync - db.MmapFlags = options.MmapFlags - db.NoFreelistSync = options.NoFreelistSync - db.FreelistType = options.FreelistType - - // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize - - flag := os.O_RDWR - if options.ReadOnly { - flag = os.O_RDONLY - db.readOnly = true - } - - db.openFile = options.OpenFile - if db.openFile == nil { - db.openFile = os.OpenFile - } - - // Open data file and separate sync handler for metadata writes. - var err error - if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { - _ = db.close() - return nil, err - } - db.path = db.file.Name() - - // Lock file so that other processes using Bolt in read-write mode cannot - // use the database at the same time. This would cause corruption since - // the two processes would write meta pages and free pages separately. - // The database file is locked exclusively (only one process can grab the lock) - // if !options.ReadOnly. - // The database file is locked using the shared lock (more than one process may - // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, !db.readOnly, options.Timeout); err != nil { - _ = db.close() - return nil, err - } - - // Default values for test hooks - db.ops.writeAt = db.file.WriteAt - - if db.pageSize = options.PageSize; db.pageSize == 0 { - // Set the default page size to the OS page size. - db.pageSize = defaultPageSize - } - - // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { - _ = db.close() - return nil, err - } else if info.Size() == 0 { - // Initialize new files with meta pages. - if err := db.init(); err != nil { - // clean up file descriptor on initialization fail - _ = db.close() - return nil, err - } - } else { - // Read the first meta page to determine the page size. - var buf [0x1000]byte - // If we can't read the page size, but can read a page, assume - // it's the same as the OS or one given -- since that's how the - // page size was chosen in the first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - // - // TODO: scan for next page - if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - db.pageSize = int(m.pageSize) - } - } else { - _ = db.close() - return nil, ErrInvalid - } - } - - // Initialize page pool. - db.pagePool = sync.Pool{ - New: func() interface{} { - return make([]byte, db.pageSize) - }, - } - - // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { - _ = db.close() - return nil, err - } - - if db.readOnly { - return db, nil - } - - db.loadFreelist() - - // Flush freelist when transitioning from no sync to sync so - // NoFreelistSync unaware boltdb can open the db later. - if !db.NoFreelistSync && !db.hasSyncedFreelist() { - tx, err := db.Begin(true) - if tx != nil { - err = tx.Commit() - } - if err != nil { - _ = db.close() - return nil, err - } - } - - // Mark the database as opened and return. - return db, nil -} - -// loadFreelist reads the freelist if it is synced, or reconstructs it -// by scanning the DB if it is not synced. It assumes there are no -// concurrent accesses being made to the freelist. -func (db *DB) loadFreelist() { - db.freelistLoad.Do(func() { - db.freelist = newFreelist(db.FreelistType) - if !db.hasSyncedFreelist() { - // Reconstruct free list by scanning the DB. - db.freelist.readIDs(db.freepages()) - } else { - // Read free list from freelist page. - db.freelist.read(db.page(db.meta().freelist)) - } - db.stats.FreePageN = db.freelist.free_count() - }) -} - -func (db *DB) hasSyncedFreelist() bool { - return db.meta().freelist != pgidNoFreelist -} - -// mmap opens the underlying memory-mapped file and initializes the meta references. -// minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } - - // Ensure the size is at least the minimum size. - var size = int(info.Size()) - if size < minsz { - size = minsz - } - size, err = db.mmapSize(size) - if err != nil { - return err - } - - // Dereference all mmap references before unmapping. - if db.rwtx != nil { - db.rwtx.root.dereference() - } - - // Unmap existing data before continuing. - if err := db.munmap(); err != nil { - return err - } - - // Memory-map the data file as a byte slice. - if err := mmap(db, size); err != nil { - return err - } - - // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() - - // Validate the meta pages. We only return an error if both meta pages fail - // validation, since meta0 failing validation means that it wasn't saved - // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() - if err0 != nil && err1 != nil { - return err0 - } - - return nil -} - -// munmap unmaps the data file from memory. -func (db *DB) munmap() error { - if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) - } - return nil -} - -// mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 32KB and doubles until it reaches 1GB. -// Returns an error if the new mmap size is greater than the max allowed. -func (db *DB) mmapSize(size int) (int, error) { - // Double the size from 32KB until 1GB. - for i := uint(15); i <= 30; i++ { - if size <= 1< maxMapSize { - return 0, fmt.Errorf("mmap too large") - } - - // If larger than 1GB then grow by 1GB at a time. - sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder - } - - // Ensure that the mmap size is a multiple of the page size. - // This should always be true since we're incrementing in MBs. - pageSize := int64(db.pageSize) - if (sz % pageSize) != 0 { - sz = ((sz / pageSize) + 1) * pageSize - } - - // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize - } - - return int(sz), nil -} - -// init creates a new database file and initializes its meta pages. -func (db *DB) init() error { - // Create two meta pages on a buffer. - buf := make([]byte, db.pageSize*4) - for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag - - // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() - } - - // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 - - // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 - - // Write the buffer to our data file. - if _, err := db.ops.writeAt(buf, 0); err != nil { - return err - } - if err := fdatasync(db); err != nil { - return err - } - - return nil -} - -// Close releases all database resources. -// It will block waiting for any open transactions to finish -// before closing the database and returning. -func (db *DB) Close() error { - db.rwlock.Lock() - defer db.rwlock.Unlock() - - db.metalock.Lock() - defer db.metalock.Unlock() - - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - return db.close() -} - -func (db *DB) close() error { - if !db.opened { - return nil - } - - db.opened = false - - db.freelist = nil - - // Clear ops. - db.ops.writeAt = nil - - // Close the mmap. - if err := db.munmap(); err != nil { - return err - } - - // Close file handles. - if db.file != nil { - // No need to unlock read-only file. - if !db.readOnly { - // Unlock the file. - if err := funlock(db); err != nil { - log.Printf("bolt.Close(): funlock error: %s", err) - } - } - - // Close the file descriptor. - if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) - } - db.file = nil - } - - db.path = "" - return nil -} - -// Begin starts a new transaction. -// Multiple read-only transactions can be used concurrently but only one -// write transaction can be used at a time. Starting multiple write transactions -// will cause the calls to block and be serialized until the current write -// transaction finishes. -// -// Transactions should not be dependent on one another. Opening a read -// transaction and a write transaction in the same goroutine can cause the -// writer to deadlock because the database periodically needs to re-mmap itself -// as it grows and it cannot do that while a read transaction is open. -// -// If a long running read transaction (for example, a snapshot transaction) is -// needed, you might want to set DB.InitialMmapSize to a large enough value -// to avoid potential blocking of write transaction. -// -// IMPORTANT: You must close read-only transactions after you are finished or -// else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { - if writable { - return db.beginRWTx() - } - return db.beginTx() -} - -func (db *DB) beginTx() (*Tx, error) { - // Lock the meta pages while we initialize the transaction. We obtain - // the meta lock before the mmap lock because that's the order that the - // write transaction will obtain them. - db.metalock.Lock() - - // Obtain a read-only lock on the mmap. When the mmap is remapped it will - // obtain a write lock so all transactions must finish before it can be - // remapped. - db.mmaplock.RLock() - - // Exit if the database is not open yet. - if !db.opened { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{} - t.init(db) - - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN = n - db.statlock.Unlock() - - return t, nil -} - -func (db *DB) beginRWTx() (*Tx, error) { - // If the database was opened with Options.ReadOnly, return an error. - if db.readOnly { - return nil, ErrDatabaseReadOnly - } - - // Obtain writer lock. This is released by the transaction when it closes. - // This enforces only one writer transaction at a time. - db.rwlock.Lock() - - // Once we have the writer lock then we can lock the meta pages so that - // we can set up the transaction. - db.metalock.Lock() - defer db.metalock.Unlock() - - // Exit if the database is not open yet. - if !db.opened { - db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{writable: true} - t.init(db) - db.rwtx = t - db.freePages() - return t, nil -} - -// freePages releases any pages associated with closed read-only transactions. -func (db *DB) freePages() { - // Free all pending pages prior to earliest open transaction. - sort.Sort(txsById(db.txs)) - minid := txid(0xFFFFFFFFFFFFFFFF) - if len(db.txs) > 0 { - minid = db.txs[0].meta.txid - } - if minid > 0 { - db.freelist.release(minid - 1) - } - // Release unused txid extents. - for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.txid-1) - minid = t.meta.txid + 1 - } - db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) - // Any page both allocated and freed in an extent is safe to release. -} - -type txsById []*Tx - -func (t txsById) Len() int { return len(t) } -func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } - -// removeTx removes a transaction from the database. -func (db *DB) removeTx(tx *Tx) { - // Release the read lock on the mmap. - db.mmaplock.RUnlock() - - // Use the meta lock to restrict access to the DB object. - db.metalock.Lock() - - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - last := len(db.txs) - 1 - db.txs[i] = db.txs[last] - db.txs[last] = nil - db.txs = db.txs[:last] - break - } - } - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN = n - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() -} - -// Update executes a function within the context of a read-write managed transaction. -// If no error is returned from the function then the transaction is committed. -// If an error is returned then the entire transaction is rolled back. -// Any error that is returned from the function or returned from the commit is -// returned from the Update() method. -// -// Attempting to manually commit or rollback within the function will cause a panic. -func (db *DB) Update(fn func(*Tx) error) error { - t, err := db.Begin(true) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually commit. - t.managed = true - - // If an error is returned from the function then rollback and return error. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Commit() -} - -// View executes a function within the context of a managed read-only transaction. -// Any error that is returned from the function is returned from the View() method. -// -// Attempting to manually rollback within the function will cause a panic. -func (db *DB) View(fn func(*Tx) error) error { - t, err := db.Begin(false) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually rollback. - t.managed = true - - // If an error is returned from the function then pass it through. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Rollback() -} - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - c.err <- err - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} - -// Sync executes fdatasync() against the database file handle. -// -// This is not necessary under normal operation, however, if you use NoSync -// then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } - -// Stats retrieves ongoing performance stats for the database. -// This is only updated when a transaction closes. -func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats -} - -// This is for internal access to the raw data bytes from the C cursor, use -// carefully, or not at all. -func (db *DB) Info() *Info { - return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} -} - -// page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) -} - -// pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) -} - -// meta retrieves the current meta page reference. -func (db *DB) meta() *meta { - // We have to return the meta with the highest txid which doesn't fail - // validation. Otherwise, we can cause errors when in fact the database is - // in a consistent state. metaA is the one with the higher txid. - metaA := db.meta0 - metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { - metaA = db.meta1 - metaB = db.meta0 - } - - // Use higher meta page if valid. Otherwise fallback to previous, if valid. - if err := metaA.validate(); err == nil { - return metaA - } else if err := metaB.validate(); err == nil { - return metaB - } - - // This should never be reached, because both meta1 and meta0 were validated - // on mmap() and we do fsync() on every write. - panic("bolt.DB.meta(): invalid meta pages") -} - -// allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(txid txid, count int) (*page, error) { - // Allocate a temporary buffer for the page. - var buf []byte - if count == 1 { - buf = db.pagePool.Get().([]byte) - } else { - buf = make([]byte, count*db.pageSize) - } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) - - // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(txid, count); p.id != 0 { - return p, nil - } - - // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize - if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) - } - } - - // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) - - return p, nil -} - -// grow grows the size of the database to the given sz. -func (db *DB) grow(sz int) error { - // Ignore if the new size is less than available file size. - if sz <= db.filesz { - return nil - } - - // If the data is smaller than the alloc size then only allocate what's needed. - // Once it goes over the allocation size then allocate in chunks. - if db.datasz < db.AllocSize { - sz = db.datasz - } else { - sz += db.AllocSize - } - - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if runtime.GOOS != "windows" { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - - db.filesz = sz - return nil -} - -func (db *DB) IsReadOnly() bool { - return db.readOnly -} - -func (db *DB) freepages() []pgid { - tx, err := db.beginTx() - defer func() { - err = tx.Rollback() - if err != nil { - panic("freepages: failed to rollback tx") - } - }() - if err != nil { - panic("freepages: failed to open read only tx") - } - - reachable := make(map[pgid]*page) - nofreed := make(map[pgid]bool) - ech := make(chan error) - go func() { - for e := range ech { - panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) - } - }() - tx.checkBucket(&tx.root, reachable, nofreed, ech) - close(ech) - - var fids []pgid - for i := pgid(2); i < db.meta().pgid; i++ { - if _, ok := reachable[i]; !ok { - fids = append(fids, i) - } - } - return fids -} - -// Options represents the options that can be set when opening a database. -type Options struct { - // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. - Timeout time.Duration - - // Sets the DB.NoGrowSync flag before memory mapping the file. - NoGrowSync bool - - // Do not sync freelist to disk. This improves the database write performance - // under normal operation, but requires a full database re-sync during recovery. - NoFreelistSync bool - - // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures - // dramatic performance degradation if database is large and framentation in freelist is common. - // The alternative one is using hashmap, it is faster in almost all circumstances - // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. - // The default type is array - FreelistType FreelistType - - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to - // grab a shared lock (UNIX). - ReadOnly bool - - // Sets the DB.MmapFlags flag before memory mapping the file. - MmapFlags int - - // InitialMmapSize is the initial mmap size of the database - // in bytes. Read transactions won't block write transaction - // if the InitialMmapSize is large enough to hold database mmap - // size. (See DB.Begin for more information) - // - // If <=0, the initial map size is 0. - // If initialMmapSize is smaller than the previous database size, - // it takes no effect. - InitialMmapSize int - - // PageSize overrides the default OS page size. - PageSize int - - // NoSync sets the initial value of DB.NoSync. Normally this can just be - // set directly on the DB itself when returned from Open(), but this option - // is useful in APIs which expose Options but not the underlying DB. - NoSync bool - - // OpenFile is used to open files. It defaults to os.OpenFile. This option - // is useful for writing hermetic tests. - OpenFile func(string, int, os.FileMode) (*os.File, error) -} - -// DefaultOptions represent the options used if nil options are passed into Open(). -// No timeout is used which will cause Bolt to wait indefinitely for a lock. -var DefaultOptions = &Options{ - Timeout: 0, - NoGrowSync: false, - FreelistType: FreelistArrayType, -} - -// Stats represents statistics about the database. -type Stats struct { - // Freelist stats - FreePageN int // total number of free pages on the freelist - PendingPageN int // total number of pending pages on the freelist - FreeAlloc int // total bytes allocated in free pages - FreelistInuse int // total bytes used by the freelist - - // Transaction stats - TxN int // total number of started read transactions - OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. -} - -// Sub calculates and returns the difference between two sets of database stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *Stats) Sub(other *Stats) Stats { - if other == nil { - return *s - } - var diff Stats - diff.FreePageN = s.FreePageN - diff.PendingPageN = s.PendingPageN - diff.FreeAlloc = s.FreeAlloc - diff.FreelistInuse = s.FreelistInuse - diff.TxN = s.TxN - other.TxN - diff.TxStats = s.TxStats.Sub(&other.TxStats) - return diff -} - -type Info struct { - Data uintptr - PageSize int -} - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != 0 && m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { - // TODO: reject pgidNoFreeList if !NoFreelistSync - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/vendor/go.etcd.io/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go deleted file mode 100644 index 95f25f01c..000000000 --- a/vendor/go.etcd.io/bbolt/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -package bbolt implements a low-level key/value store in pure Go. It supports -fully serializable transactions, ACID semantics, and lock-free MVCC with -multiple readers and a single writer. Bolt can be used for projects that -want a simple data store without the need to add large dependencies such as -Postgres or MySQL. - -Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is -optimized for fast read access and does not require recovery in the event of a -system crash. Transactions which have not finished committing will simply be -rolled back in the event of a crash. - -The design of Bolt is based on Howard Chu's LMDB database project. - -Bolt currently works on Windows, Mac OS X, and Linux. - - -Basics - -There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is -a collection of buckets and is represented by a single file on disk. A bucket is -a collection of unique keys that are associated with values. - -Transactions provide either read-only or read-write access to the database. -Read-only transactions can retrieve key/value pairs and can use Cursors to -iterate over the dataset sequentially. Read-write transactions can create and -delete buckets and can insert and remove keys. Only one read-write transaction -is allowed at a time. - - -Caveats - -The database uses a read-only, memory-mapped data file to ensure that -applications cannot corrupt the database, however, this means that keys and -values returned from Bolt cannot be changed. Writing to a read-only byte slice -will cause Go to panic. - -Keys and values retrieved from the database are only valid for the life of -the transaction. When used outside the transaction, these byte slices can -point to different data or can point to invalid memory which will cause a panic. - - -*/ -package bbolt diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go deleted file mode 100644 index 48758ca57..000000000 --- a/vendor/go.etcd.io/bbolt/errors.go +++ /dev/null @@ -1,71 +0,0 @@ -package bbolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when both meta pages on a database are invalid. - // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go deleted file mode 100644 index 697a46968..000000000 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ /dev/null @@ -1,404 +0,0 @@ -package bbolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// txPending holds a list of pgids and corresponding allocation txns -// that are pending to be freed. -type txPending struct { - ids []pgid - alloctx []txid // txids allocating the ids - lastReleaseBegin txid // beginning txid of last matching releaseRange -} - -// pidSet holds the set of starting pgids which have the same span size -type pidSet map[pgid]struct{} - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - freelistType FreelistType // freelist type - ids []pgid // all free and available free page ids. - allocs map[pgid]txid // mapping of txid that allocated a pgid. - pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. - freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size - forwardMap map[pgid]uint64 // key is start pgid, value is its span size - backwardMap map[pgid]uint64 // key is end pgid, value is its span size - allocate func(txid txid, n int) pgid // the freelist allocate func - free_count func() int // the function which gives you free page number - mergeSpans func(ids pgids) // the mergeSpan func - getFreePageIDs func() []pgid // get free pgids func - readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType FreelistType) *freelist { - f := &freelist{ - freelistType: freelistType, - allocs: make(map[pgid]txid), - pending: make(map[txid]*txPending), - cache: make(map[pgid]bool), - freemaps: make(map[uint64]pidSet), - forwardMap: make(map[pgid]uint64), - backwardMap: make(map[pgid]uint64), - } - - if freelistType == FreelistMapType { - f.allocate = f.hashmapAllocate - f.free_count = f.hashmapFreeCount - f.mergeSpans = f.hashmapMergeSpans - f.getFreePageIDs = f.hashmapGetFreePageIDs - f.readIDs = f.hashmapReadIDs - } else { - f.allocate = f.arrayAllocate - f.free_count = f.arrayFreeCount - f.mergeSpans = f.arrayMergeSpans - f.getFreePageIDs = f.arrayGetFreePageIDs - f.readIDs = f.arrayReadIDs - } - - return f -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// arrayFreeCount returns count of free pages(array version) -func (f *freelist) arrayFreeCount() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, txp := range f.pending { - count += len(txp.ids) - } - return count -} - -// copyall copies a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) - } - sort.Sort(m) - mergepgids(dst, f.getFreePageIDs(), m) -} - -// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid txid, n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - f.allocs[initial] = txid - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - txp := f.pending[txid] - if txp == nil { - txp = &txPending{} - f.pending[txid] = txp - } - allocTxid, ok := f.allocs[p.id] - if ok { - delete(f.allocs, p.id) - } else if (p.flags & freelistPageFlag) != 0 { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 - } - - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if f.cache[id] { - panic(fmt.Sprintf("page %d already freed", id)) - } - // Add to the freelist and cache. - txp.ids = append(txp.ids, id) - txp.alloctx = append(txp.alloctx, allocTxid) - f.cache[id] = true - } -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, txp := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, txp.ids...) - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end txid) { - if begin > end { - return - } - var m pgids - for tid, txp := range f.pending { - if tid < begin || tid > end { - continue - } - // Don't recompute freed pages if ranges haven't updated. - if txp.lastReleaseBegin == begin { - continue - } - for i := 0; i < len(txp.ids); i++ { - if atx := txp.alloctx[i]; atx < begin || atx > end { - continue - } - m = append(m, txp.ids[i]) - txp.ids[i] = txp.ids[len(txp.ids)-1] - txp.ids = txp.ids[:len(txp.ids)-1] - txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] - txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] - i-- - } - txp.lastReleaseBegin = begin - if len(txp.ids) == 0 { - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - txp := f.pending[txid] - if txp == nil { - return - } - var m pgids - for i, pgid := range txp.ids { - delete(f.cache, pgid) - tx := txp.alloctx[i] - if tx == 0 { - continue - } - if tx != txid { - // Pending free aborted; restore page back to alloc list. - f.allocs[pgid] = tx - } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) - } - } - // Remove pages from pending list and mark as free if allocated by txid. - delete(f.pending, txid) - f.mergeSpans(m) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - if (p.flags & freelistPageFlag) == 0 { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) - } - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - var idx, count = 0, int(p.count) - if count == 0xFFFF { - idx = 1 - c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) - count = int(c) - if count < 0 { - panic(fmt.Sprintf("leading element count %d overflows int", c)) - } - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - var ids []pgid - data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) - unsafeSlice(unsafe.Pointer(&ids), data, count) - - // copy the ids, so we don't modify on the freelist page directly - idsCopy := make([]pgid, count) - copy(idsCopy, ids) - // Make sure they're sorted. - sort.Sort(pgids(idsCopy)) - - f.readIDs(idsCopy) - } -} - -// arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []pgid) { - f.ids = ids - f.reindex() -} - -func (f *freelist) arrayGetFreePageIDs() []pgid { - return f.ids -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - l := f.count() - if l == 0 { - p.count = uint16(l) - } else if l < 0xFFFF { - p.count = uint16(l) - var ids []pgid - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&ids), data, l) - f.copyall(ids) - } else { - p.count = 0xFFFF - var ids []pgid - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&ids), data, l+1) - ids[0] = pgid(l) - f.copyall(ids[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.getFreePageIDs() { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// noSyncReload reads the freelist from pgids and filters out pending items. -func (f *freelist) noSyncReload(pgids []pgid) { - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range pgids { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - ids := f.getFreePageIDs() - f.cache = make(map[pgid]bool, len(ids)) - for _, id := range ids { - f.cache[id] = true - } - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - f.cache[pendingID] = true - } - } -} - -// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids pgids) { - sort.Sort(ids) - f.ids = pgids(f.ids).merge(ids) -} diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go deleted file mode 100644 index 02ef2be04..000000000 --- a/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ /dev/null @@ -1,178 +0,0 @@ -package bbolt - -import "sort" - -// hashmapFreeCount returns count of free pages(hashmap version) -func (f *freelist) hashmapFreeCount() int { - // use the forwardmap to get the total count - count := 0 - for _, size := range f.forwardMap { - count += int(size) - } - return count -} - -// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend -func (f *freelist) hashmapAllocate(txid txid, n int) pgid { - if n == 0 { - return 0 - } - - // if we have a exact size match just return short path - if bm, ok := f.freemaps[uint64(n)]; ok { - for pid := range bm { - // remove the span - f.delSpan(pid, uint64(n)) - - f.allocs[pid] = txid - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - // lookup the map to find larger span - for size, bm := range f.freemaps { - if size < uint64(n) { - continue - } - - for pid := range bm { - // remove the initial - f.delSpan(pid, uint64(size)) - - f.allocs[pid] = txid - - remain := size - uint64(n) - - // add remain span - f.addSpan(pid+pgid(n), remain) - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+pgid(i)) - } - return pid - } - } - - return 0 -} - -// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) -func (f *freelist) hashmapReadIDs(pgids []pgid) { - f.init(pgids) - - // Rebuild the page cache. - f.reindex() -} - -// hashmapGetFreePageIDs returns the sorted free page ids -func (f *freelist) hashmapGetFreePageIDs() []pgid { - count := f.free_count() - if count == 0 { - return nil - } - - m := make([]pgid, 0, count) - for start, size := range f.forwardMap { - for i := 0; i < int(size); i++ { - m = append(m, start+pgid(i)) - } - } - sort.Sort(pgids(m)) - - return m -} - -// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans -func (f *freelist) hashmapMergeSpans(ids pgids) { - for _, id := range ids { - // try to see if we can merge and update - f.mergeWithExistingSpan(id) - } -} - -// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward -func (f *freelist) mergeWithExistingSpan(pid pgid) { - prev := pid - 1 - next := pid + 1 - - preSize, mergeWithPrev := f.backwardMap[prev] - nextSize, mergeWithNext := f.forwardMap[next] - newStart := pid - newSize := uint64(1) - - if mergeWithPrev { - //merge with previous span - start := prev + 1 - pgid(preSize) - f.delSpan(start, preSize) - - newStart -= pgid(preSize) - newSize += preSize - } - - if mergeWithNext { - // merge with next span - f.delSpan(next, nextSize) - newSize += nextSize - } - - f.addSpan(newStart, newSize) -} - -func (f *freelist) addSpan(start pgid, size uint64) { - f.backwardMap[start-1+pgid(size)] = size - f.forwardMap[start] = size - if _, ok := f.freemaps[size]; !ok { - f.freemaps[size] = make(map[pgid]struct{}) - } - - f.freemaps[size][start] = struct{}{} -} - -func (f *freelist) delSpan(start pgid, size uint64) { - delete(f.forwardMap, start) - delete(f.backwardMap, start+pgid(size-1)) - delete(f.freemaps[size], start) - if len(f.freemaps[size]) == 0 { - delete(f.freemaps, size) - } -} - -// initial from pgids using when use hashmap version -// pgids must be sorted -func (f *freelist) init(pgids []pgid) { - if len(pgids) == 0 { - return - } - - size := uint64(1) - start := pgids[0] - - if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { - panic("pgids not sorted") - } - - f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[pgid]uint64) - f.backwardMap = make(map[pgid]uint64) - - for i := 1; i < len(pgids); i++ { - // continuous page - if pgids[i] == pgids[i-1]+1 { - size++ - } else { - f.addSpan(start, size) - - size = 1 - start = pgids[i] - } - } - - // init the tail - if size != 0 && start != 0 { - f.addSpan(start, size) - } -} diff --git a/vendor/go.etcd.io/bbolt/go.mod b/vendor/go.etcd.io/bbolt/go.mod deleted file mode 100644 index c2366daef..000000000 --- a/vendor/go.etcd.io/bbolt/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module go.etcd.io/bbolt - -go 1.12 - -require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 diff --git a/vendor/go.etcd.io/bbolt/go.sum b/vendor/go.etcd.io/bbolt/go.sum deleted file mode 100644 index 4ad15a488..000000000 --- a/vendor/go.etcd.io/bbolt/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go deleted file mode 100644 index 73988b5c4..000000000 --- a/vendor/go.etcd.io/bbolt/node.go +++ /dev/null @@ -1,602 +0,0 @@ -package bbolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) - } - return int(sz) -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v uintptr) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() uintptr { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgid - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -func (n *node) write(p *page) { - // Initialize page. - if n.isLeaf { - p.flags |= leafPageFlag - } else { - p.flags |= branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Stop here if there are no items to write. - if p.count == 0 { - return - } - - // Loop over each item and write it to the page. - // off tracks the offset into p of the start of the next data. - off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Create a slice to write into of needed size and advance - // byte pointer for next iteration. - sz := len(item.key) + len(item.value) - b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) - off += uintptr(sz) - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // Write data for the element to the end of the page. - l := copy(b, item.key) - copy(b[l:], item.value) - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize uintptr) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize uintptr) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.Split++ - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz uintptr) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = uintptr(i) - inode := n.inodes[i] - elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(uintptr(tx.db.pageSize)) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.Spill++ - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.Rebalance++ - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.NodeDeref++ -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { - return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 -} - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go deleted file mode 100644 index c9a158fb0..000000000 --- a/vendor/go.etcd.io/bbolt/page.go +++ /dev/null @@ -1,204 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = unsafe.Sizeof(page{}) - -const minKeysPerPage = 2 - -const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) -const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - leafPageElementSize, int(index))) -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - var elems []leafPageElement - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) - return elems -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - unsafe.Sizeof(branchPageElement{}), int(index))) -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - var elems []branchPageElement - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) - return elems -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - i := int(n.pos) - j := i + int(n.ksize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - i := int(n.pos) + int(n.ksize) - j := i + int(n.vsize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go deleted file mode 100644 index 4b1a64a8b..000000000 --- a/vendor/go.etcd.io/bbolt/tx.go +++ /dev/null @@ -1,724 +0,0 @@ -package bbolt - -import ( - "fmt" - "io" - "os" - "sort" - "strings" - "time" - "unsafe" -) - -// txid represents the internal transaction identifier. -type txid uint64 - -// Tx represents a read-only or read/write transaction on the database. -// Read-only transactions can be used for retrieving values for keys and creating cursors. -// Read/write transactions can create and remove buckets and create and remove keys. -// -// IMPORTANT: You must commit or rollback transactions when you are done with -// them. Pages can not be reclaimed by the writer until no more transactions -// are using them. A long running read transaction can cause the database to -// quickly grow. -type Tx struct { - writable bool - managed bool - db *DB - meta *meta - root Bucket - pages map[pgid]*page - stats TxStats - commitHandlers []func() - - // WriteFlag specifies the flag for write-related methods like WriteTo(). - // Tx opens the database file with the specified flag to copy the data. - // - // By default, the flag is unset, which works well for mostly in-memory - // workloads. For databases that are much larger than available RAM, - // set the flag to syscall.O_DIRECT to avoid trashing the page cache. - WriteFlag int -} - -// init initializes the transaction. -func (tx *Tx) init(db *DB) { - tx.db = db - tx.pages = nil - - // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) - - // Copy over the root bucket. - tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root - - // Increment the transaction id and add a page cache for writable transactions. - if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) - } -} - -// ID returns the transaction id. -func (tx *Tx) ID() int { - return int(tx.meta.txid) -} - -// DB returns a reference to the database that created the transaction. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Size returns current database size in bytes as seen by this transaction. -func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) -} - -// Writable returns whether the transaction can perform write operations. -func (tx *Tx) Writable() bool { - return tx.writable -} - -// Cursor creates a cursor associated with the root bucket. -// All items in the cursor will return a nil value because all root bucket keys point to buckets. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (tx *Tx) Cursor() *Cursor { - return tx.root.Cursor() -} - -// Stats retrieves a copy of the current transaction statistics. -func (tx *Tx) Stats() TxStats { - return tx.stats -} - -// Bucket retrieves a bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.root.Bucket(name) -} - -// CreateBucket creates a new bucket. -// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.root.CreateBucket(name) -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.root.CreateBucketIfNotExists(name) -} - -// DeleteBucket deletes a bucket. -// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. -func (tx *Tx) DeleteBucket(name []byte) error { - return tx.root.DeleteBucket(name) -} - -// ForEach executes a function for each bucket in the root. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { - return tx.root.ForEach(func(k, v []byte) error { - return fn(k, tx.root.Bucket(k)) - }) -} - -// OnCommit adds a handler function to be executed after the transaction successfully commits. -func (tx *Tx) OnCommit(fn func()) { - tx.commitHandlers = append(tx.commitHandlers, fn) -} - -// Commit writes all changes to disk and updates the meta page. -// Returns an error if a disk write error occurs, or if Commit is -// called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") - if tx.db == nil { - return ErrTxClosed - } else if !tx.writable { - return ErrTxNotWritable - } - - // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. - - // Rebalance nodes which have had deletions. - var startTime = time.Now() - tx.root.rebalance() - if tx.stats.Rebalance > 0 { - tx.stats.RebalanceTime += time.Since(startTime) - } - - // spill data onto dirty pages. - startTime = time.Now() - if err := tx.root.spill(); err != nil { - tx.rollback() - return err - } - tx.stats.SpillTime += time.Since(startTime) - - // Free the old root bucket. - tx.meta.root.root = tx.root.root - - // Free the old freelist because commit writes out a fresh freelist. - if tx.meta.freelist != pgidNoFreelist { - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - } - - if !tx.db.NoFreelistSync { - err := tx.commitFreelist() - if err != nil { - return err - } - } else { - tx.meta.freelist = pgidNoFreelist - } - - // Write dirty pages to disk. - startTime = time.Now() - if err := tx.write(); err != nil { - tx.rollback() - return err - } - - // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. - if tx.db.StrictMode { - ch := tx.Check() - var errs []string - for { - err, ok := <-ch - if !ok { - break - } - errs = append(errs, err.Error()) - } - if len(errs) > 0 { - panic("check fail: " + strings.Join(errs, "\n")) - } - } - - // Write meta to disk. - if err := tx.writeMeta(); err != nil { - tx.rollback() - return err - } - tx.stats.WriteTime += time.Since(startTime) - - // Finalize the transaction. - tx.close() - - // Execute commit handlers now that the locks have been removed. - for _, fn := range tx.commitHandlers { - fn() - } - - return nil -} - -func (tx *Tx) commitFreelist() error { - // Allocate new pages for the new free list. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - opgid := tx.meta.pgid - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() - return err - } - } - - return nil -} - -// Rollback closes the transaction and ignores all previous updates. Read-only -// transactions must be rolled back and not committed. -func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") - if tx.db == nil { - return ErrTxClosed - } - tx.nonPhysicalRollback() - return nil -} - -// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk. -func (tx *Tx) nonPhysicalRollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - } - tx.close() -} - -// rollback needs to reload the free pages from disk in case some system error happens like fsync error. -func (tx *Tx) rollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - if !tx.db.hasSyncedFreelist() { - // Reconstruct free page list by scanning the DB to get the whole free page list. - // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. - tx.db.freelist.noSyncReload(tx.db.freepages()) - } else { - // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) - } - } - tx.close() -} - -func (tx *Tx) close() { - if tx.db == nil { - return - } - if tx.writable { - // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() - - // Remove transaction ref & writer lock. - tx.db.rwtx = nil - tx.db.rwlock.Unlock() - - // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() - } else { - tx.db.removeTx(tx) - } - - // Clear all references. - tx.db = nil - tx.meta = nil - tx.root = Bucket{tx: tx} - tx.pages = nil -} - -// Copy writes the entire database to a writer. -// This function exists for backwards compatibility. -// -// Deprecated; Use WriteTo() instead. -func (tx *Tx) Copy(w io.Writer) error { - _, err := tx.WriteTo(w) - return err -} - -// WriteTo writes the entire database to a writer. -// If err == nil then exactly tx.Size() bytes will be written into the writer. -func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { - if cerr := f.Close(); err == nil { - err = cerr - } - }() - - // Generate a meta page. We use the same page data for both meta pages. - buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta - - // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() - nn, err := w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 0 copy: %s", err) - } - - // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() - nn, err = w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 1 copy: %s", err) - } - - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { - return n, fmt.Errorf("seek: %s", err) - } - - // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) - n += wn - if err != nil { - return n, err - } - - return n, nil -} - -// CopyFile copies the entire database to file at the given path. -// A reader transaction is maintained during the copy so it is safe to continue -// using the database while a copy is in progress. -func (tx *Tx) CopyFile(path string, mode os.FileMode) error { - f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - - err = tx.Copy(f) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - ch := make(chan error) - go tx.check(ch) - return ch -} - -func (tx *Tx) check(ch chan error) { - // Force loading free list if opened in ReadOnly mode. - tx.db.loadFreelist() - - // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) - for _, id := range all { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, 0, func(p *page, _ int) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references", int(id)) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) - } - }) - - // Check each bucket within this bucket. - _ = b.ForEach(func(k, v []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, ch) - } - return nil - }) -} - -// allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(tx.meta.txid, count) - if err != nil { - return nil, err - } - - // Save to our page cache. - tx.pages[p.id] = p - - // Update statistics. - tx.stats.PageCount += count - tx.stats.PageAlloc += count * tx.db.pageSize - - return p, nil -} - -// write writes any dirty pages to disk. -func (tx *Tx) write() error { - // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) - for _, p := range tx.pages { - pages = append(pages, p) - } - // Clear out page cache early. - tx.pages = make(map[pgid]*page) - sort.Sort(pages) - - // Write pages to disk in order. - for _, p := range pages { - rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) - offset := int64(p.id) * int64(tx.db.pageSize) - var written uintptr - - // Write out page in "max allocation" sized chunks. - for { - sz := rem - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 - } - buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) - - if _, err := tx.db.ops.writeAt(buf, offset); err != nil { - return err - } - - // Update statistics. - tx.stats.Write++ - - // Exit inner for loop if we've written all the chunks. - rem -= sz - if rem == 0 { - break - } - - // Otherwise move offset forward and move pointer to next chunk. - offset += int64(sz) - written += uintptr(sz) - } - } - - // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Put small pages back to page pool. - for _, p := range pages { - // Ignore page sizes over 1 page. - // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { - continue - } - - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) - - // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 - for i := range buf { - buf[i] = 0 - } - tx.db.pagePool.Put(buf) - } - - return nil -} - -// writeMeta writes the meta to the disk. -func (tx *Tx) writeMeta() error { - // Create a temporary buffer for the meta page. - buf := make([]byte, tx.db.pageSize) - p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) - - // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { - return err - } - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Update statistics. - tx.stats.Write++ - - return nil -} - -// page returns a reference to the page with a given id. -// If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { - // Check the dirty pages first. - if tx.pages != nil { - if p, ok := tx.pages[id]; ok { - return p - } - } - - // Otherwise return directly from the mmap. - return tx.db.page(id) -} - -// forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { - p := tx.page(pgid) - - // Execute function. - fn(p, depth) - - // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPage(elem.pgid, depth+1, fn) - } - } -} - -// Page returns page information for a given page number. -// This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { - if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { - return nil, nil - } - - // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ - ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), - } - - // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { - info.Type = "free" - } else { - info.Type = p.typ() - } - - return info, nil -} - -// TxStats represents statistics about the actions performed by the transaction. -type TxStats struct { - // Page statistics. - PageCount int // number of page allocations - PageAlloc int // total bytes allocated - - // Cursor statistics. - CursorCount int // number of cursors created - - // Node statistics - NodeCount int // number of node allocations - NodeDeref int // number of node dereferences - - // Rebalance statistics. - Rebalance int // number of node rebalances - RebalanceTime time.Duration // total time spent rebalancing - - // Split/Spill statistics. - Split int // number of nodes split - Spill int // number of nodes spilled - SpillTime time.Duration // total time spent spilling - - // Write statistics. - Write int // number of writes performed - WriteTime time.Duration // total time spent writing to disk -} - -func (s *TxStats) add(other *TxStats) { - s.PageCount += other.PageCount - s.PageAlloc += other.PageAlloc - s.CursorCount += other.CursorCount - s.NodeCount += other.NodeCount - s.NodeDeref += other.NodeDeref - s.Rebalance += other.Rebalance - s.RebalanceTime += other.RebalanceTime - s.Split += other.Split - s.Spill += other.Spill - s.SpillTime += other.SpillTime - s.Write += other.Write - s.WriteTime += other.WriteTime -} - -// Sub calculates and returns the difference between two sets of transaction stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *TxStats) Sub(other *TxStats) TxStats { - var diff TxStats - diff.PageCount = s.PageCount - other.PageCount - diff.PageAlloc = s.PageAlloc - other.PageAlloc - diff.CursorCount = s.CursorCount - other.CursorCount - diff.NodeCount = s.NodeCount - other.NodeCount - diff.NodeDeref = s.NodeDeref - other.NodeDeref - diff.Rebalance = s.Rebalance - other.Rebalance - diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime - diff.Split = s.Split - other.Split - diff.Spill = s.Spill - other.Spill - diff.SpillTime = s.SpillTime - other.SpillTime - diff.Write = s.Write - other.Write - diff.WriteTime = s.WriteTime - other.WriteTime - return diff -} diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/unsafe.go deleted file mode 100644 index c0e503750..000000000 --- a/vendor/go.etcd.io/bbolt/unsafe.go +++ /dev/null @@ -1,39 +0,0 @@ -package bbolt - -import ( - "reflect" - "unsafe" -) - -func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { - return unsafe.Pointer(uintptr(base) + offset) -} - -func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { - return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) -} - -func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { - // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices - // - // This memory is not allocated from C, but it is unmanaged by Go's - // garbage collector and should behave similarly, and the compiler - // should produce similar code. Note that this conversion allows a - // subslice to begin after the base address, with an optional offset, - // while the URL above does not cover this case and only slices from - // index 0. However, the wiki never says that the address must be to - // the beginning of a C allocation (or even that malloc was used at - // all), so this is believed to be correct. - return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] -} - -// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by -// the slice parameter. This helper should be used over other direct -// manipulation of reflect.SliceHeader to prevent misuse, namely, converting -// from reflect.SliceHeader to a Go slice type. -func unsafeSlice(slice, data unsafe.Pointer, len int) { - s := (*reflect.SliceHeader)(slice) - s.Data = uintptr(data) - s.Cap = len - s.Len = len -} diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore index c3fa25389..2e337a0ed 100644 --- a/vendor/go.uber.org/atomic/.gitignore +++ b/vendor/go.uber.org/atomic/.gitignore @@ -10,3 +10,6 @@ lint.log # Profiling output *.prof + +# Output of fossa analyzer +/fossa diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml deleted file mode 100644 index 13d0a4f25..000000000 --- a/vendor/go.uber.org/atomic/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/atomic - -env: - global: - - GO111MODULE=on - -matrix: - include: - - go: oldstable - - go: stable - env: LINT=1 - -cache: - directories: - - vendor - -before_install: - - go version - -script: - - test -z "$LINT" || make lint - - make cover - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md index 24c0274dc..38f564e2b 100644 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -4,6 +4,20 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.9.0] - 2021-07-15 +### Added +- Add `Float64.Swap` to match int atomic operations. +- Add `atomic.Time` type for atomic operations on `time.Time` values. + +[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0 + +## [1.8.0] - 2021-06-09 +### Added +- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. +- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. + +[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 + ## [1.7.0] - 2020-09-14 ### Added - Support JSON serialization and deserialization of primitive atomic types. @@ -15,32 +29,46 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed - Remove dependency on `golang.org/x/{lint, tools}`. +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 + ## [1.6.0] - 2020-02-24 ### Changed - Drop library dependency on `golang.org/x/{lint, tools}`. +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 + ## [1.5.1] - 2019-11-19 - Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together causing `CAS` to fail even though the old value matches. +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 + ## [1.5.0] - 2019-10-29 ### Changed - With Go modules, only the `go.uber.org/atomic` import path is supported now. If you need to use the old import path, please add a `replace` directive to your `go.mod`. +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 + ## [1.4.0] - 2019-05-01 ### Added - Add `atomic.Error` type for atomic operations on `error` values. +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 + ## [1.3.2] - 2018-05-02 ### Added - Add `atomic.Duration` type for atomic operations on `time.Duration` values. +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 + ## [1.3.1] - 2017-11-14 ### Fixed - Revert optimization for `atomic.String.Store("")` which caused data races. +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 + ## [1.3.0] - 2017-11-13 ### Added - Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. @@ -48,10 +76,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Optimize `atomic.String.Store("")` by avoiding an allocation. +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 + ## [1.2.0] - 2017-04-12 ### Added - Shadow `atomic.Value` from `sync/atomic`. +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 + ## [1.1.0] - 2017-03-10 ### Added - Add atomic `Float64` type. @@ -59,18 +91,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Support new `go.uber.org/atomic` import path. +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 + ## [1.0.0] - 2016-07-18 - Initial release. -[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 -[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 -[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 -[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 -[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 -[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 -[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 -[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 -[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 -[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 [1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile index 1b1376d42..46c945b32 100644 --- a/vendor/go.uber.org/atomic/Makefile +++ b/vendor/go.uber.org/atomic/Makefile @@ -69,6 +69,7 @@ generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) generatenodirty: @[ -z "$$(git status --porcelain)" ] || ( \ echo "Working tree is dirty. Commit your changes first."; \ + git status; \ exit 1 ) @make generate @status=$$(git status --porcelain); \ diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md index ade0c20f1..96b47a1f1 100644 --- a/vendor/go.uber.org/atomic/README.md +++ b/vendor/go.uber.org/atomic/README.md @@ -55,8 +55,8 @@ Released under the [MIT License](LICENSE.txt). [doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg [doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.com/uber-go/atomic +[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml [cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/atomic [reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go index 9cf1914b1..209df7bbc 100644 --- a/vendor/go.uber.org/atomic/bool.go +++ b/vendor/go.uber.org/atomic/bool.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -36,10 +36,10 @@ type Bool struct { var _zeroBool bool // NewBool creates a new Bool. -func NewBool(v bool) *Bool { +func NewBool(val bool) *Bool { x := &Bool{} - if v != _zeroBool { - x.Store(v) + if val != _zeroBool { + x.Store(val) } return x } @@ -50,19 +50,19 @@ func (x *Bool) Load() bool { } // Store atomically stores the passed bool. -func (x *Bool) Store(v bool) { - x.v.Store(boolToInt(v)) +func (x *Bool) Store(val bool) { + x.v.Store(boolToInt(val)) } // CAS is an atomic compare-and-swap for bool values. -func (x *Bool) CAS(o, n bool) bool { - return x.v.CAS(boolToInt(o), boolToInt(n)) +func (x *Bool) CAS(old, new bool) (swapped bool) { + return x.v.CAS(boolToInt(old), boolToInt(new)) } // Swap atomically stores the given bool and returns the old // value. -func (x *Bool) Swap(o bool) bool { - return truthy(x.v.Swap(boolToInt(o))) +func (x *Bool) Swap(val bool) (old bool) { + return truthy(x.v.Swap(boolToInt(val))) } // MarshalJSON encodes the wrapped bool into JSON. diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go index c7bf7a827..a2e60e987 100644 --- a/vendor/go.uber.org/atomic/bool_ext.go +++ b/vendor/go.uber.org/atomic/bool_ext.go @@ -38,7 +38,7 @@ func boolToInt(b bool) uint32 { } // Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() bool { +func (b *Bool) Toggle() (old bool) { for { old := b.Load() if b.CAS(old, !old) { diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go index 027cfcb20..207594f5e 100644 --- a/vendor/go.uber.org/atomic/duration.go +++ b/vendor/go.uber.org/atomic/duration.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -37,10 +37,10 @@ type Duration struct { var _zeroDuration time.Duration // NewDuration creates a new Duration. -func NewDuration(v time.Duration) *Duration { +func NewDuration(val time.Duration) *Duration { x := &Duration{} - if v != _zeroDuration { - x.Store(v) + if val != _zeroDuration { + x.Store(val) } return x } @@ -51,19 +51,19 @@ func (x *Duration) Load() time.Duration { } // Store atomically stores the passed time.Duration. -func (x *Duration) Store(v time.Duration) { - x.v.Store(int64(v)) +func (x *Duration) Store(val time.Duration) { + x.v.Store(int64(val)) } // CAS is an atomic compare-and-swap for time.Duration values. -func (x *Duration) CAS(o, n time.Duration) bool { - return x.v.CAS(int64(o), int64(n)) +func (x *Duration) CAS(old, new time.Duration) (swapped bool) { + return x.v.CAS(int64(old), int64(new)) } // Swap atomically stores the given time.Duration and returns the old // value. -func (x *Duration) Swap(o time.Duration) time.Duration { - return time.Duration(x.v.Swap(int64(o))) +func (x *Duration) Swap(val time.Duration) (old time.Duration) { + return time.Duration(x.v.Swap(int64(val))) } // MarshalJSON encodes the wrapped time.Duration into JSON. diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go index 6273b66bd..4c18b0a9e 100644 --- a/vendor/go.uber.org/atomic/duration_ext.go +++ b/vendor/go.uber.org/atomic/duration_ext.go @@ -25,13 +25,13 @@ import "time" //go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go // Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(n time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(n))) +func (d *Duration) Add(delta time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(delta))) } // Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(n time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(n))) +func (d *Duration) Sub(delta time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(delta))) } // String encodes the wrapped value as a string. diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go index a6166fbea..3be19c35e 100644 --- a/vendor/go.uber.org/atomic/error.go +++ b/vendor/go.uber.org/atomic/error.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -32,10 +32,10 @@ type Error struct { var _zeroError error // NewError creates a new Error. -func NewError(v error) *Error { +func NewError(val error) *Error { x := &Error{} - if v != _zeroError { - x.Store(v) + if val != _zeroError { + x.Store(val) } return x } @@ -46,6 +46,6 @@ func (x *Error) Load() error { } // Store atomically stores the passed error. -func (x *Error) Store(v error) { - x.v.Store(packError(v)) +func (x *Error) Store(val error) { + x.v.Store(packError(val)) } diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go index 071906020..8a1367184 100644 --- a/vendor/go.uber.org/atomic/float64.go +++ b/vendor/go.uber.org/atomic/float64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -37,10 +37,10 @@ type Float64 struct { var _zeroFloat64 float64 // NewFloat64 creates a new Float64. -func NewFloat64(v float64) *Float64 { +func NewFloat64(val float64) *Float64 { x := &Float64{} - if v != _zeroFloat64 { - x.Store(v) + if val != _zeroFloat64 { + x.Store(val) } return x } @@ -51,13 +51,14 @@ func (x *Float64) Load() float64 { } // Store atomically stores the passed float64. -func (x *Float64) Store(v float64) { - x.v.Store(math.Float64bits(v)) +func (x *Float64) Store(val float64) { + x.v.Store(math.Float64bits(val)) } -// CAS is an atomic compare-and-swap for float64 values. -func (x *Float64) CAS(o, n float64) bool { - return x.v.CAS(math.Float64bits(o), math.Float64bits(n)) +// Swap atomically stores the given float64 and returns the old +// value. +func (x *Float64) Swap(val float64) (old float64) { + return math.Float64frombits(x.v.Swap(math.Float64bits(val))) } // MarshalJSON encodes the wrapped float64 into JSON. diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go index 927b1add7..df36b0107 100644 --- a/vendor/go.uber.org/atomic/float64_ext.go +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -20,15 +20,18 @@ package atomic -import "strconv" +import ( + "math" + "strconv" +) -//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go // Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(s float64) float64 { +func (f *Float64) Add(delta float64) float64 { for { old := f.Load() - new := old + s + new := old + delta if f.CAS(old, new) { return new } @@ -36,8 +39,27 @@ func (f *Float64) Add(s float64) float64 { } // Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(s float64) float64 { - return f.Add(-s) +func (f *Float64) Sub(delta float64) float64 { + return f.Add(-delta) +} + +// CAS is an atomic compare-and-swap for float64 values. +// +// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CAS allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CAS loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CAS(old, new) { +// break +// } +// } +// +// If CAS did not match NaN to match, then the above would loop forever. +func (f *Float64) CAS(old, new float64) (swapped bool) { + return f.v.CAS(math.Float64bits(old), math.Float64bits(new)) } // String encodes the wrapped value as a string. diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go index 50d6b2485..1e9ef4f87 100644 --- a/vendor/go.uber.org/atomic/gen.go +++ b/vendor/go.uber.org/atomic/gen.go @@ -24,3 +24,4 @@ package atomic //go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go //go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go //go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go +//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/vendor/go.uber.org/atomic/go.mod b/vendor/go.uber.org/atomic/go.mod deleted file mode 100644 index daa7599fe..000000000 --- a/vendor/go.uber.org/atomic/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module go.uber.org/atomic - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/stretchr/testify v1.3.0 -) - -go 1.13 diff --git a/vendor/go.uber.org/atomic/go.sum b/vendor/go.uber.org/atomic/go.sum deleted file mode 100644 index 4f76e62c1..000000000 --- a/vendor/go.uber.org/atomic/go.sum +++ /dev/null @@ -1,9 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go index 18ae56493..640ea36a1 100644 --- a/vendor/go.uber.org/atomic/int32.go +++ b/vendor/go.uber.org/atomic/int32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -36,8 +36,8 @@ type Int32 struct { } // NewInt32 creates a new Int32. -func NewInt32(i int32) *Int32 { - return &Int32{v: i} +func NewInt32(val int32) *Int32 { + return &Int32{v: val} } // Load atomically loads the wrapped value. @@ -46,13 +46,13 @@ func (i *Int32) Load() int32 { } // Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(n int32) int32 { - return atomic.AddInt32(&i.v, n) +func (i *Int32) Add(delta int32) int32 { + return atomic.AddInt32(&i.v, delta) } // Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(n int32) int32 { - return atomic.AddInt32(&i.v, -n) +func (i *Int32) Sub(delta int32) int32 { + return atomic.AddInt32(&i.v, -delta) } // Inc atomically increments the wrapped int32 and returns the new value. @@ -66,18 +66,18 @@ func (i *Int32) Dec() int32 { } // CAS is an atomic compare-and-swap. -func (i *Int32) CAS(old, new int32) bool { +func (i *Int32) CAS(old, new int32) (swapped bool) { return atomic.CompareAndSwapInt32(&i.v, old, new) } // Store atomically stores the passed value. -func (i *Int32) Store(n int32) { - atomic.StoreInt32(&i.v, n) +func (i *Int32) Store(val int32) { + atomic.StoreInt32(&i.v, val) } // Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(n int32) int32 { - return atomic.SwapInt32(&i.v, n) +func (i *Int32) Swap(val int32) (old int32) { + return atomic.SwapInt32(&i.v, val) } // MarshalJSON encodes the wrapped int32 into JSON. diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go index 2bcbbfaa9..9ab66b980 100644 --- a/vendor/go.uber.org/atomic/int64.go +++ b/vendor/go.uber.org/atomic/int64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -36,8 +36,8 @@ type Int64 struct { } // NewInt64 creates a new Int64. -func NewInt64(i int64) *Int64 { - return &Int64{v: i} +func NewInt64(val int64) *Int64 { + return &Int64{v: val} } // Load atomically loads the wrapped value. @@ -46,13 +46,13 @@ func (i *Int64) Load() int64 { } // Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(n int64) int64 { - return atomic.AddInt64(&i.v, n) +func (i *Int64) Add(delta int64) int64 { + return atomic.AddInt64(&i.v, delta) } // Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(n int64) int64 { - return atomic.AddInt64(&i.v, -n) +func (i *Int64) Sub(delta int64) int64 { + return atomic.AddInt64(&i.v, -delta) } // Inc atomically increments the wrapped int64 and returns the new value. @@ -66,18 +66,18 @@ func (i *Int64) Dec() int64 { } // CAS is an atomic compare-and-swap. -func (i *Int64) CAS(old, new int64) bool { +func (i *Int64) CAS(old, new int64) (swapped bool) { return atomic.CompareAndSwapInt64(&i.v, old, new) } // Store atomically stores the passed value. -func (i *Int64) Store(n int64) { - atomic.StoreInt64(&i.v, n) +func (i *Int64) Store(val int64) { + atomic.StoreInt64(&i.v, val) } // Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(n int64) int64 { - return atomic.SwapInt64(&i.v, n) +func (i *Int64) Swap(val int64) (old int64) { + return atomic.SwapInt64(&i.v, val) } // MarshalJSON encodes the wrapped int64 into JSON. diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go index 225b7a2be..80df93d09 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/atomic/string.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -32,10 +32,10 @@ type String struct { var _zeroString string // NewString creates a new String. -func NewString(v string) *String { +func NewString(val string) *String { x := &String{} - if v != _zeroString { - x.Store(v) + if val != _zeroString { + x.Store(val) } return x } @@ -49,6 +49,6 @@ func (x *String) Load() string { } // Store atomically stores the passed string. -func (x *String) Store(v string) { - x.v.Store(v) +func (x *String) Store(val string) { + x.v.Store(val) } diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go index 3a9558213..83d92edaf 100644 --- a/vendor/go.uber.org/atomic/string_ext.go +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -21,6 +21,8 @@ package atomic //go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go +// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which +// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351 // String returns the wrapped value. func (s *String) String() string { diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go new file mode 100644 index 000000000..33460fc37 --- /dev/null +++ b/vendor/go.uber.org/atomic/time.go @@ -0,0 +1,55 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "time" +) + +// Time is an atomic type-safe wrapper for time.Time values. +type Time struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroTime time.Time + +// NewTime creates a new Time. +func NewTime(val time.Time) *Time { + x := &Time{} + if val != _zeroTime { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped time.Time. +func (x *Time) Load() time.Time { + return unpackTime(x.v.Load()) +} + +// Store atomically stores the passed time.Time. +func (x *Time) Store(val time.Time) { + x.v.Store(packTime(val)) +} diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go new file mode 100644 index 000000000..1e3dc978a --- /dev/null +++ b/vendor/go.uber.org/atomic/time_ext.go @@ -0,0 +1,36 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go + +func packTime(t time.Time) interface{} { + return t +} + +func unpackTime(v interface{}) time.Time { + if t, ok := v.(time.Time); ok { + return t + } + return time.Time{} +} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go index a973aba1a..7859a9cc3 100644 --- a/vendor/go.uber.org/atomic/uint32.go +++ b/vendor/go.uber.org/atomic/uint32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -36,8 +36,8 @@ type Uint32 struct { } // NewUint32 creates a new Uint32. -func NewUint32(i uint32) *Uint32 { - return &Uint32{v: i} +func NewUint32(val uint32) *Uint32 { + return &Uint32{v: val} } // Load atomically loads the wrapped value. @@ -46,13 +46,13 @@ func (i *Uint32) Load() uint32 { } // Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(n uint32) uint32 { - return atomic.AddUint32(&i.v, n) +func (i *Uint32) Add(delta uint32) uint32 { + return atomic.AddUint32(&i.v, delta) } // Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(n uint32) uint32 { - return atomic.AddUint32(&i.v, ^(n - 1)) +func (i *Uint32) Sub(delta uint32) uint32 { + return atomic.AddUint32(&i.v, ^(delta - 1)) } // Inc atomically increments the wrapped uint32 and returns the new value. @@ -66,18 +66,18 @@ func (i *Uint32) Dec() uint32 { } // CAS is an atomic compare-and-swap. -func (i *Uint32) CAS(old, new uint32) bool { +func (i *Uint32) CAS(old, new uint32) (swapped bool) { return atomic.CompareAndSwapUint32(&i.v, old, new) } // Store atomically stores the passed value. -func (i *Uint32) Store(n uint32) { - atomic.StoreUint32(&i.v, n) +func (i *Uint32) Store(val uint32) { + atomic.StoreUint32(&i.v, val) } // Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(n uint32) uint32 { - return atomic.SwapUint32(&i.v, n) +func (i *Uint32) Swap(val uint32) (old uint32) { + return atomic.SwapUint32(&i.v, val) } // MarshalJSON encodes the wrapped uint32 into JSON. diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go index 3b6c71fd5..2f2a7db63 100644 --- a/vendor/go.uber.org/atomic/uint64.go +++ b/vendor/go.uber.org/atomic/uint64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -36,8 +36,8 @@ type Uint64 struct { } // NewUint64 creates a new Uint64. -func NewUint64(i uint64) *Uint64 { - return &Uint64{v: i} +func NewUint64(val uint64) *Uint64 { + return &Uint64{v: val} } // Load atomically loads the wrapped value. @@ -46,13 +46,13 @@ func (i *Uint64) Load() uint64 { } // Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(n uint64) uint64 { - return atomic.AddUint64(&i.v, n) +func (i *Uint64) Add(delta uint64) uint64 { + return atomic.AddUint64(&i.v, delta) } // Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(n uint64) uint64 { - return atomic.AddUint64(&i.v, ^(n - 1)) +func (i *Uint64) Sub(delta uint64) uint64 { + return atomic.AddUint64(&i.v, ^(delta - 1)) } // Inc atomically increments the wrapped uint64 and returns the new value. @@ -66,18 +66,18 @@ func (i *Uint64) Dec() uint64 { } // CAS is an atomic compare-and-swap. -func (i *Uint64) CAS(old, new uint64) bool { +func (i *Uint64) CAS(old, new uint64) (swapped bool) { return atomic.CompareAndSwapUint64(&i.v, old, new) } // Store atomically stores the passed value. -func (i *Uint64) Store(n uint64) { - atomic.StoreUint64(&i.v, n) +func (i *Uint64) Store(val uint64) { + atomic.StoreUint64(&i.v, val) } // Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(n uint64) uint64 { - return atomic.SwapUint64(&i.v, n) +func (i *Uint64) Swap(val uint64) (old uint64) { + return atomic.SwapUint64(&i.v, val) } // MarshalJSON encodes the wrapped uint64 into JSON. diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go new file mode 100644 index 000000000..ecf7a7727 --- /dev/null +++ b/vendor/go.uber.org/atomic/uintptr.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uintptr is an atomic wrapper around uintptr. +type Uintptr struct { + _ nocmp // disallow non-atomic comparison + + v uintptr +} + +// NewUintptr creates a new Uintptr. +func NewUintptr(val uintptr) *Uintptr { + return &Uintptr{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uintptr) Load() uintptr { + return atomic.LoadUintptr(&i.v) +} + +// Add atomically adds to the wrapped uintptr and returns the new value. +func (i *Uintptr) Add(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uintptr and returns the new value. +func (i *Uintptr) Sub(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uintptr and returns the new value. +func (i *Uintptr) Inc() uintptr { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uintptr and returns the new value. +func (i *Uintptr) Dec() uintptr { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { + return atomic.CompareAndSwapUintptr(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uintptr) Store(val uintptr) { + atomic.StoreUintptr(&i.v, val) +} + +// Swap atomically swaps the wrapped uintptr and returns the old value. +func (i *Uintptr) Swap(val uintptr) (old uintptr) { + return atomic.SwapUintptr(&i.v, val) +} + +// MarshalJSON encodes the wrapped uintptr into JSON. +func (i *Uintptr) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uintptr. +func (i *Uintptr) UnmarshalJSON(b []byte) error { + var v uintptr + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uintptr) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go new file mode 100644 index 000000000..169f793dc --- /dev/null +++ b/vendor/go.uber.org/atomic/unsafe_pointer.go @@ -0,0 +1,58 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "sync/atomic" + "unsafe" +) + +// UnsafePointer is an atomic wrapper around unsafe.Pointer. +type UnsafePointer struct { + _ nocmp // disallow non-atomic comparison + + v unsafe.Pointer +} + +// NewUnsafePointer creates a new UnsafePointer. +func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer { + return &UnsafePointer{v: val} +} + +// Load atomically loads the wrapped value. +func (p *UnsafePointer) Load() unsafe.Pointer { + return atomic.LoadPointer(&p.v) +} + +// Store atomically stores the passed value. +func (p *UnsafePointer) Store(val unsafe.Pointer) { + atomic.StorePointer(&p.v, val) +} + +// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. +func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { + return atomic.SwapPointer(&p.v, val) +} + +// CAS is an atomic compare-and-swap. +func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { + return atomic.CompareAndSwapPointer(&p.v, old, new) +} diff --git a/vendor/go.uber.org/multierr/go.mod b/vendor/go.uber.org/multierr/go.mod deleted file mode 100644 index ff8bdf95f..000000000 --- a/vendor/go.uber.org/multierr/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module go.uber.org/multierr - -go 1.12 - -require ( - github.com/stretchr/testify v1.3.0 - go.uber.org/atomic v1.7.0 -) diff --git a/vendor/go.uber.org/multierr/go.sum b/vendor/go.uber.org/multierr/go.sum deleted file mode 100644 index ecfc28657..000000000 --- a/vendor/go.uber.org/multierr/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml deleted file mode 100644 index cfdc69f41..000000000 --- a/vendor/go.uber.org/zap/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go -sudo: false - -go_import_path: go.uber.org/zap -env: - global: - - TEST_TIMEOUT_SCALE=10 - - GO111MODULE=on - -matrix: - include: - - go: 1.13.x - - go: 1.14.x - env: LINT=1 - -script: - - test -z "$LINT" || make lint - - make test - - make bench - -after_success: - - make cover - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index fa817e6a1..794ee303e 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,4 +1,79 @@ # Changelog +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## 1.19.1 (8 Sep 2021) + +### Fixed +* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. +* [#1003][]: JSON: Fix inaccurate precision when encoding float32. + +[#1001]: https://github.com/uber-go/zap/pull/1001 +[#1003]: https://github.com/uber-go/zap/pull/1003 + +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. + +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + +## 1.17.0 (25 May 2021) + +Bugfixes: +* [#867][]: Encode `` for nil `error` instead of a panic. +* [#931][], [#936][]: Update minimum version constraints to address + vulnerabilities in dependencies. + +Enhancements: +* [#865][]: Improve alignment of fields of the Logger struct, reducing its + size from 96 to 80 bytes. +* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. +* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler + with the `application/x-www-form-urlencoded` content type. +* [#912][]: Support multi-field encoding with `zap.Inline`. +* [#913][]: Speed up SugaredLogger for calls with a single string. +* [#928][]: Add support for filtering by field name to `zaptest/observer`. + +Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. ## 1.16.0 (1 Sep 2020) @@ -430,3 +505,12 @@ upgrade to the upcoming stable release. [#854]: https://github.com/uber-go/zap/pull/854 [#861]: https://github.com/uber-go/zap/pull/861 [#862]: https://github.com/uber-go/zap/pull/862 +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md index 9454bbaf0..5cd965687 100644 --- a/vendor/go.uber.org/zap/CONTRIBUTING.md +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -25,12 +25,6 @@ git remote add upstream https://github.com/uber-go/zap.git git fetch upstream ``` -Install zap's dependencies: - -``` -make dependencies -``` - Make sure that the tests and the linters pass: ``` diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md index 5ec728875..b183b20bc 100644 --- a/vendor/go.uber.org/zap/FAQ.md +++ b/vendor/go.uber.org/zap/FAQ.md @@ -27,6 +27,13 @@ abstraction, and it lets us add methods without introducing breaking changes. Your applications should define and depend upon an interface that includes just the methods you use. +### Why are some of my logs missing? + +Logs are dropped intentionally by zap when sampling is enabled. The production +configuration (as returned by `NewProductionConfig()` enables sampling which will +cause repeated logs within a second to be sampled. See more details on why sampling +is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). + ### Why sample application logs? Applications often experience runs of errors, either because of a bug or @@ -150,6 +157,7 @@ We're aware of the following extensions, but haven't used them ourselves: | `github.com/fgrosse/zaptest` | Ginkgo | | `github.com/blendle/zapdriver` | Stackdriver | | `github.com/moul/zapgorm` | Gorm | +| `github.com/moul/zapfilter` | Advanced filtering rules | [go-proverbs]: https://go-proverbs.github.io/ [import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index dfaf6406e..9b1bc3b0e 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -7,7 +7,7 @@ BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem # Directories containing independent Go modules. # # We track coverage only for the main module. -MODULE_DIRS = . ./benchmarks +MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test # Many Go tools take file globs or directories as arguments instead of packages. GO_FILES := $(shell \ @@ -33,12 +33,18 @@ lint: $(GOLINT) $(STATICCHECK) @echo "Checking for license headers..." @./checklicense.sh | tee -a lint.log @[ ! -s lint.log ] + @echo "Checking 'go mod tidy'..." + @make tidy + @if ! git diff --quiet; then \ + echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ + git --no-pager diff; \ + fi $(GOLINT): - go install golang.org/x/lint/golint + cd tools && go install golang.org/x/lint/golint $(STATICCHECK): - go install honnef.co/go/tools/cmd/staticcheck + cd tools && go install honnef.co/go/tools/cmd/staticcheck .PHONY: test test: @@ -61,3 +67,7 @@ bench: updatereadme: rm -f README.md cat .readme.tmpl | go run internal/readme/readme.go > README.md + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index bcea28a19..1e64d6cff 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -123,10 +123,10 @@ Released under the [MIT License](LICENSE.txt). benchmarking against slightly older versions of other packages. Versions are pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.com/uber-go/zap +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml [cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 3f4b86e08..9e929cd98 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -106,6 +106,24 @@ func (b *Buffer) Write(bs []byte) (int, error) { return len(bs), nil } +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + // TrimNewline trims any final "\n" byte from the end of the buffer. func (b *Buffer) TrimNewline() { if i := len(b.bs) - 1; i >= 0 { diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index 3c0d7d957..bbb745db5 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -400,6 +400,16 @@ func Object(key string, val zapcore.ObjectMarshaler) Field { return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} } +// Inline constructs a Field that is similar to Object, but it +// will add the elements of the provided ObjectMarshaler to the +// current namespace. +func Inline(val zapcore.ObjectMarshaler) Field { + return zapcore.Field{ + Type: zapcore.InlineMarshalerType, + Interface: val, + } +} + // Any takes a key and an arbitrary value and chooses the best way to represent // them as a field, falling back to a reflection-based approach only if // necessary. diff --git a/vendor/go.uber.org/zap/go.mod b/vendor/go.uber.org/zap/go.mod deleted file mode 100644 index 6ef4db70e..000000000 --- a/vendor/go.uber.org/zap/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module go.uber.org/zap - -go 1.13 - -require ( - github.com/pkg/errors v0.8.1 - github.com/stretchr/testify v1.4.0 - go.uber.org/atomic v1.6.0 - go.uber.org/multierr v1.5.0 - golang.org/x/lint v0.0.0-20190930215403-16217165b5de - gopkg.in/yaml.v2 v2.2.2 - honnef.co/go/tools v0.0.1-2019.2.3 -) diff --git a/vendor/go.uber.org/zap/go.sum b/vendor/go.uber.org/zap/go.sum deleted file mode 100644 index 99cdb93ea..000000000 --- a/vendor/go.uber.org/zap/go.sum +++ /dev/null @@ -1,56 +0,0 @@ -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index 1b0ecaca9..1297c33b3 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -23,6 +23,7 @@ package zap import ( "encoding/json" "fmt" + "io" "net/http" "go.uber.org/zap/zapcore" @@ -31,47 +32,63 @@ import ( // ServeHTTP is a simple JSON endpoint that can report on or change the current // logging level. // -// GET requests return a JSON description of the current logging level. PUT -// requests change the logging level and expect a payload like: +// GET +// +// The GET request returns a JSON description of the current logging level like: // {"level":"info"} // -// It's perfectly safe to change the logging level while a program is running. +// PUT +// +// The PUT request changes the logging level. It is perfectly safe to change the +// logging level while a program is running. Two content types are supported: +// +// Content-Type: application/x-www-form-urlencoded +// +// With this content type, the level can be provided through the request body or +// a query parameter. The log level is URL encoded like: +// +// level=debug +// +// The request body takes precedence over the query parameter, if both are +// specified. +// +// This content type is the default for a curl PUT request. Following are two +// example curl requests that both set the logging level to debug. +// +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug +// +// For any other content type, the payload is expected to be JSON encoded and +// look like: +// +// {"level":"info"} +// +// An example curl request could look like this: +// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' +// func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { type errorResponse struct { Error string `json:"error"` } type payload struct { - Level *zapcore.Level `json:"level"` + Level zapcore.Level `json:"level"` } enc := json.NewEncoder(w) switch r.Method { - case http.MethodGet: - current := lvl.Level() - enc.Encode(payload{Level: ¤t}) - + enc.Encode(payload{Level: lvl.Level()}) case http.MethodPut: - var req payload - - if errmess := func() string { - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return fmt.Sprintf("Request body must be well-formed JSON: %v", err) - } - if req.Level == nil { - return "Must specify a logging level." - } - return "" - }(); errmess != "" { + requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) + if err != nil { w.WriteHeader(http.StatusBadRequest) - enc.Encode(errorResponse{Error: errmess}) + enc.Encode(errorResponse{Error: err.Error()}) return } - - lvl.SetLevel(*req.Level) - enc.Encode(req) - + lvl.SetLevel(requestedLvl) + enc.Encode(payload{Level: lvl.Level()}) default: w.WriteHeader(http.StatusMethodNotAllowed) enc.Encode(errorResponse{ @@ -79,3 +96,37 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { }) } } + +// Decodes incoming PUT requests and returns the requested logging level. +func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { + if contentType == "application/x-www-form-urlencoded" { + return decodePutURL(r) + } + return decodePutJSON(r.Body) +} + +func decodePutURL(r *http.Request) (zapcore.Level, error) { + lvl := r.FormValue("level") + if lvl == "" { + return 0, fmt.Errorf("must specify logging level") + } + var l zapcore.Level + if err := l.UnmarshalText([]byte(lvl)); err != nil { + return 0, err + } + return l, nil +} + +func decodePutJSON(body io.Reader) (zapcore.Level, error) { + var pld struct { + Level *zapcore.Level `json:"level"` + } + if err := json.NewDecoder(body).Decode(&pld); err != nil { + return 0, fmt.Errorf("malformed request body: %v", err) + } + if pld.Level == nil { + return 0, fmt.Errorf("must specify logging level") + } + return *pld.Level, nil + +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index ea484aed1..f116bd936 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -26,7 +26,6 @@ import ( "os" "runtime" "strings" - "time" "go.uber.org/zap/zapcore" ) @@ -42,14 +41,17 @@ type Logger struct { core zapcore.Core development bool + addCaller bool + onFatal zapcore.CheckWriteAction // default is WriteThenFatal + name string errorOutput zapcore.WriteSyncer - addCaller bool - addStack zapcore.LevelEnabler + addStack zapcore.LevelEnabler callerSkip int - onFatal zapcore.CheckWriteAction // default is WriteThenFatal + + clock zapcore.Clock } // New constructs a new Logger from the provided zapcore.Core and Options. If @@ -70,6 +72,7 @@ func New(core zapcore.Core, options ...Option) *Logger { core: core, errorOutput: zapcore.Lock(os.Stderr), addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, } return log.WithOptions(options...) } @@ -84,6 +87,7 @@ func NewNop() *Logger { core: zapcore.NewNopCore(), errorOutput: zapcore.AddSync(ioutil.Discard), addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, } } @@ -269,7 +273,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // log message will actually be written somewhere. ent := zapcore.Entry{ LoggerName: log.name, - Time: time.Now(), + Time: log.clock.Now(), Level: lvl, Message: msg, } @@ -306,7 +310,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { if log.addCaller { frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) if !defined { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) log.errorOutput.Sync() } @@ -334,7 +338,7 @@ func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { const skipOffset = 2 // skip getCallerFrame and Callers pc := make([]uintptr, 1) - numFrames := runtime.Callers(skip+skipOffset, pc[:]) + numFrames := runtime.Callers(skip+skipOffset, pc) if numFrames < 1 { return } diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 0135c2092..e9e66161f 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -138,3 +138,11 @@ func OnFatal(action zapcore.CheckWriteAction) Option { log.onFatal = action }) } + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 77ca227f4..0b9651981 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -222,19 +222,30 @@ func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interf return } - // Format with Sprint, Sprintf, or neither. - msg := template - if msg == "" && len(fmtArgs) > 0 { - msg = fmt.Sprint(fmtArgs...) - } else if msg != "" && len(fmtArgs) > 0 { - msg = fmt.Sprintf(template, fmtArgs...) - } - + msg := getMessage(template, fmtArgs) if ce := s.base.Check(lvl, msg); ce != nil { ce.Write(s.sweetenFields(context)...) } } +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { if len(args) == 0 { return nil @@ -255,7 +266,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { // Make sure this element isn't a dangling key. if i == len(args)-1 { - s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) break } @@ -276,7 +287,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { // If we encountered any invalid key-value pairs, log an error. if len(invalid) > 0 { - s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) } return fields } diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 000000000..ef2f7d963 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,188 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + stopped bool // whether Stop() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 000000000..d2ea95b39 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,50 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" +) + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index 3b68f8c0c..2307af404 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -56,7 +56,7 @@ type consoleEncoder struct { // encoder configuration, it will omit any element whose key is set to the empty // string. func NewConsoleEncoder(cfg EncoderConfig) Encoder { - if len(cfg.ConsoleSeparator) == 0 { + if cfg.ConsoleSeparator == "" { // Use a default delimiter of '\t' for backwards compatibility cfg.ConsoleSeparator = "\t" } diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 4aa8b4f90..0885505b7 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -208,7 +208,7 @@ func (ce *CheckedEntry) Write(fields ...Field) { // If the entry is dirty, log an internal error; because the // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) ce.ErrorOutput.Sync() } return @@ -219,11 +219,9 @@ func (ce *CheckedEntry) Write(fields ...Field) { for i := range ce.cores { err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) } - if ce.ErrorOutput != nil { - if err != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) - ce.ErrorOutput.Sync() - } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + ce.ErrorOutput.Sync() } should, msg := ce.should, ce.Message diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index 9ba2272c3..74919b0cc 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -22,6 +22,7 @@ package zapcore import ( "fmt" + "reflect" "sync" ) @@ -42,7 +43,23 @@ import ( // ... // ], // } -func encodeError(key string, err error, enc ObjectEncoder) error { +func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the Error() method + defer func() { + if rerr := recover(); rerr != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // error that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", rerr) + } + }() + basic := err.Error() enc.AddString(key, basic) @@ -66,7 +83,7 @@ type errorGroup interface { Errors() []error } -// Note that errArry and errArrayElem are very similar to the version +// Note that errArray and errArrayElem are very similar to the version // implemented in the top-level error.go file. We can't re-use this because // that would require exporting errArray as part of the zapcore API. diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 7e255d63e..95bdb0a12 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -92,6 +92,10 @@ const ( ErrorType // SkipType indicates that the field is a no-op. SkipType + + // InlineMarshalerType indicates that the field carries an ObjectMarshaler + // that should be inlined. + InlineMarshalerType ) // A Field is a marshaling operation used to add a key-value pair to a logger's @@ -115,6 +119,8 @@ func (f Field) AddTo(enc ObjectEncoder) { err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) case ObjectMarshalerType: err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case InlineMarshalerType: + err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) case BinaryType: enc.AddBinary(f.Key, f.Interface.([]byte)) case BoolType: @@ -167,7 +173,7 @@ func (f Field) AddTo(enc ObjectEncoder) { case StringerType: err = encodeStringer(f.Key, f.Interface, enc) case ErrorType: - encodeError(f.Key, f.Interface.(error), enc) + err = encodeError(f.Key, f.Interface.(error), enc) case SkipType: break default: diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 5cf7d917e..af220d9b4 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -128,6 +128,11 @@ func (enc *jsonEncoder) AddFloat64(key string, val float64) { enc.AppendFloat64(val) } +func (enc *jsonEncoder) AddFloat32(key string, val float32) { + enc.addKey(key) + enc.AppendFloat32(val) +} + func (enc *jsonEncoder) AddInt64(key string, val int64) { enc.addKey(key) enc.AppendInt64(val) @@ -228,7 +233,11 @@ func (enc *jsonEncoder) AppendComplex128(val complex128) { // Because we're always in a quoted string, we can use strconv without // special-casing NaN and +/-Inf. enc.buf.AppendFloat(r, 64) - enc.buf.AppendByte('+') + // If imaginary part is less than 0, minus (-) sign is added by default + // by AppendFloat. + if i >= 0 { + enc.buf.AppendByte('+') + } enc.buf.AppendFloat(i, 64) enc.buf.AppendByte('i') enc.buf.AppendByte('"') @@ -293,7 +302,6 @@ func (enc *jsonEncoder) AppendUint64(val uint64) { } func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index 25f10ca1d..31ed96e12 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -197,12 +197,14 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { return ce } - counter := s.counts.get(ent.Level, ent.Message) - n := counter.IncCheckReset(ent.Time, s.tick) - if n > s.first && (n-s.first)%s.thereafter != 0 { - s.hook(ent, LogDropped) - return ce + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (n-s.first)%s.thereafter != 0 { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) } - s.hook(ent, LogSampled) return s.Core.Check(ent, ce) } diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go index 209e25fe2..d4a1af3d0 100644 --- a/vendor/go.uber.org/zap/zapcore/write_syncer.go +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -91,8 +91,7 @@ func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { if len(ws) == 1 { return ws[0] } - // Copy to protect against https://github.com/golang/go/issues/7809 - return multiWriteSyncer(append([]WriteSyncer(nil), ws...)) + return multiWriteSyncer(ws) } // See https://golang.org/src/io/multi.go diff --git a/vendor/go.uber.org/zap/zaptest/observer/observer.go b/vendor/go.uber.org/zap/zaptest/observer/observer.go index 78f5be45d..03866bd91 100644 --- a/vendor/go.uber.org/zap/zaptest/observer/observer.go +++ b/vendor/go.uber.org/zap/zaptest/observer/observer.go @@ -19,7 +19,7 @@ // THE SOFTWARE. // Package observer provides a zapcore.Core that keeps an in-memory, -// encoding-agnostic repesentation of log entries. It's useful for +// encoding-agnostic representation of log entries. It's useful for // applications that want to unit test their log output without tying their // tests to a particular output encoding. package observer // import "go.uber.org/zap/zaptest/observer" @@ -78,23 +78,30 @@ func (o *ObservedLogs) AllUntimed() []LoggedEntry { return ret } +// FilterLevelExact filters entries to those logged at exactly the given level. +func (o *ObservedLogs) FilterLevelExact(level zapcore.Level) *ObservedLogs { + return o.Filter(func(e LoggedEntry) bool { + return e.Level == level + }) +} + // FilterMessage filters entries to those that have the specified message. func (o *ObservedLogs) FilterMessage(msg string) *ObservedLogs { - return o.filter(func(e LoggedEntry) bool { + return o.Filter(func(e LoggedEntry) bool { return e.Message == msg }) } // FilterMessageSnippet filters entries to those that have a message containing the specified snippet. func (o *ObservedLogs) FilterMessageSnippet(snippet string) *ObservedLogs { - return o.filter(func(e LoggedEntry) bool { + return o.Filter(func(e LoggedEntry) bool { return strings.Contains(e.Message, snippet) }) } // FilterField filters entries to those that have the specified field. func (o *ObservedLogs) FilterField(field zapcore.Field) *ObservedLogs { - return o.filter(func(e LoggedEntry) bool { + return o.Filter(func(e LoggedEntry) bool { for _, ctxField := range e.Context { if ctxField.Equals(field) { return true @@ -104,13 +111,27 @@ func (o *ObservedLogs) FilterField(field zapcore.Field) *ObservedLogs { }) } -func (o *ObservedLogs) filter(match func(LoggedEntry) bool) *ObservedLogs { +// FilterFieldKey filters entries to those that have the specified key. +func (o *ObservedLogs) FilterFieldKey(key string) *ObservedLogs { + return o.Filter(func(e LoggedEntry) bool { + for _, ctxField := range e.Context { + if ctxField.Key == key { + return true + } + } + return false + }) +} + +// Filter returns a copy of this ObservedLogs containing only those entries +// for which the provided function returns true. +func (o *ObservedLogs) Filter(keep func(LoggedEntry) bool) *ObservedLogs { o.mu.RLock() defer o.mu.RUnlock() var filtered []LoggedEntry for _, entry := range o.logs { - if match(entry) { + if keep(entry) { filtered = append(filtered, entry) } } diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 579d2d735..474efad0e 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -76,7 +76,7 @@ arguments can be passed to the kernel. The third is for low-level use by the ForkExec wrapper. Unlike the first two, it does not call into the scheduler to let it know that a system call is running. -When porting Go to an new architecture/OS, this file must be implemented for +When porting Go to a new architecture/OS, this file must be implemented for each GOOS/GOARCH pair. ### mksysnum @@ -107,7 +107,7 @@ prototype can be exported (capitalized) or not. Adding a new syscall often just requires adding a new `//sys` function prototype with the desired arguments and a capitalized name so it is exported. However, if you want the interface to the syscall to be different, often one will make an -unexported `//sys` prototype, an then write a custom wrapper in +unexported `//sys` prototype, and then write a custom wrapper in `syscall_${GOOS}.go`. ### types files @@ -137,7 +137,7 @@ some `#if/#elif` macros in your include statements. This script is used to generate the system's various constants. This doesn't just include the error numbers and error strings, but also the signal numbers -an a wide variety of miscellaneous constants. The constants come from the list +and a wide variety of miscellaneous constants. The constants come from the list of include files in the `includes_${uname}` variable. A regex then picks out the desired `#define` statements, and generates the corresponding Go constants. The error numbers and strings are generated from `#include `, and the diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index 951fce4d0..abc89c104 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // +build go1.9 package unix diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index 6b4027b33..db9171c2e 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s similarity index 72% rename from vendor/golang.org/x/sys/unix/asm_freebsd_386.s rename to vendor/golang.org/x/sys/unix/asm_bsd_386.s index 49f0ac236..e0fcd9b3d 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -1,14 +1,14 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (freebsd || netbsd || openbsd) && gc +// +build freebsd netbsd openbsd // +build gc #include "textflag.h" -// -// System call support for 386, FreeBSD -// +// System call support for 386 BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. @@ -22,7 +22,7 @@ TEXT ·Syscall6(SB),NOSPLIT,$0-40 TEXT ·Syscall9(SB),NOSPLIT,$0-52 JMP syscall·Syscall9(SB) -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 JMP syscall·RawSyscall(SB) TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s similarity index 72% rename from vendor/golang.org/x/sys/unix/asm_darwin_amd64.s rename to vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index f2397fde5..2b99c349a 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -1,14 +1,14 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc +// +build darwin dragonfly freebsd netbsd openbsd // +build gc #include "textflag.h" -// -// System call support for AMD64, Darwin -// +// System call support for AMD64 BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s similarity index 76% rename from vendor/golang.org/x/sys/unix/asm_freebsd_arm.s rename to vendor/golang.org/x/sys/unix/asm_bsd_arm.s index 6d740db2c..d702d4adc 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -1,14 +1,14 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (freebsd || netbsd || openbsd) && gc +// +build freebsd netbsd openbsd // +build gc #include "textflag.h" -// -// System call support for ARM, FreeBSD -// +// System call support for ARM BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s similarity index 75% rename from vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s rename to vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index e57367c17..fe36a7391 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -1,14 +1,14 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd // +build gc #include "textflag.h" -// -// System call support for AMD64, NetBSD -// +// System call support for ARM64 BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s deleted file mode 100644 index 8a06b87d7..000000000 --- a/vendor/golang.org/x/sys/unix/asm_darwin_386.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for 386, Darwin -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s deleted file mode 100644 index c9e6b6fc8..000000000 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc -// +build arm,darwin - -#include "textflag.h" - -// -// System call support for ARM, Darwin -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s deleted file mode 100644 index 89843f8f4..000000000 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc -// +build arm64,darwin - -#include "textflag.h" - -// -// System call support for AMD64, Darwin -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s deleted file mode 100644 index 27674e1ca..000000000 --- a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for AMD64, DragonFly -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s deleted file mode 100644 index f2dfc57b8..000000000 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for AMD64, FreeBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s deleted file mode 100644 index a8f5a29b3..000000000 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM64, FreeBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 0655ecbfb..8fd101d07 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index bc3fb6ac3..7ed38e43c 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index 55b13c7ba..8ef1d5140 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 22a83d8e3..98ae02760 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && arm64 && gc // +build linux // +build arm64 // +build gc diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index dc222b90c..21231d2ce 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips64 || mips64le) && gc // +build linux // +build mips64 mips64le // +build gc diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index d333f13cf..6783b26c6 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips || mipsle) && gc // +build linux // +build mips mipsle // +build gc diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 459a629c2..19d498934 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (ppc64 || ppc64le) && gc // +build linux // +build ppc64 ppc64le // +build gc diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index 04d38497c..e42eb81d5 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -2,7 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build riscv64,gc +//go:build riscv64 && gc +// +build riscv64 +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index cc303989e..c46aab339 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -2,8 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x +//go:build linux && s390x && gc // +build linux +// +build s390x // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s deleted file mode 100644 index ae7b498d5..000000000 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for 386, NetBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s deleted file mode 100644 index d7da175e1..000000000 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM, NetBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s deleted file mode 100644 index e7cbe1904..000000000 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM64, NetBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s deleted file mode 100644 index 2f00b0310..000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for 386, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s deleted file mode 100644 index 07632c99c..000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for AMD64, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s deleted file mode 100644 index 73e997320..000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s deleted file mode 100644 index c47302aa4..000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for arm64, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 47c93fcb6..5e7a1169c 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index 1f2c755a7..f8c5394c1 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s new file mode 100644 index 000000000..3b54e1858 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -0,0 +1,426 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x && gc +// +build zos +// +build s390x +// +build gc + +#include "textflag.h" + +#define PSALAA 1208(R0) +#define GTAB64(x) 80(x) +#define LCA64(x) 88(x) +#define CAA(x) 8(x) +#define EDCHPXV(x) 1016(x) // in the CAA +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA + +// SS_*, where x=SAVSTACK_ASYNC +#define SS_LE(x) 0(x) +#define SS_GO(x) 8(x) +#define SS_ERRNO(x) 16(x) +#define SS_ERRNOJR(x) 20(x) + +#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 + +TEXT ·clearErrno(SB),NOSPLIT,$0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) + RET + +// Returns the address of errno in R3. +TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get __errno FuncDesc. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(0x156*16), R9 + LMG 0(R9), R5, R6 + + // Switch to saved LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Call __errno function. + LE_CALL + NOPH + + // Switch back to Go stack. + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + RET + +TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+32(FP) + MOVD R0, r2+40(FP) + MOVD R0, err+48(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) +done: + BL runtime·exitsyscall(SB) + RET + +TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+32(FP) + MOVD R0, r2+40(FP) + MOVD R0, err+48(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) +done: + RET + +TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+56(FP) + MOVD R0, r2+64(FP) + MOVD R0, err+72(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+72(FP) +done: + BL runtime·exitsyscall(SB) + RET + +TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+56(FP) + MOVD R0, r2+64(FP) + MOVD R0, err+72(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL ·rrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+72(FP) +done: + RET + +TEXT ·syscall_syscall9(SB),NOSPLIT,$0 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + MOVD a7+56(FP), R12 + MOVD R12, (2176+48)(R4) + MOVD a8+64(FP), R12 + MOVD R12, (2176+56)(R4) + MOVD a9+72(FP), R12 + MOVD R12, (2176+64)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+80(FP) + MOVD R0, r2+88(FP) + MOVD R0, err+96(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+96(FP) +done: + BL runtime·exitsyscall(SB) + RET + +TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + MOVD a7+56(FP), R12 + MOVD R12, (2176+48)(R4) + MOVD a8+64(FP), R12 + MOVD R12, (2176+56)(R4) + MOVD a9+72(FP), R12 + MOVD R12, (2176+64)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+80(FP) + MOVD R0, r2+88(FP) + MOVD R0, err+96(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+96(FP) +done: + RET + +// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +TEXT ·svcCall(SB),NOSPLIT,$0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) + + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 + + BYTE $0x0D // Branch to function + BYTE $0xEF + + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 + + RET + +// func svcLoad(name *byte) unsafe.Pointer +TEXT ·svcLoad(SB),NOSPLIT,$0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + BYTE $0x0A // SVC 08 LOAD + BYTE $0x08 + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error + + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, addr+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done + + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + BYTE $0x0A // SVC 09 DELETE + BYTE $0x09 + MOVD R2, R15 // Restore go stack pointer + +error: + MOVD $0, addr+8(FP) // Return 0 on failure +done: + XOR R0, R0 // Reset r0 to 0 + RET + +// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 +TEXT ·svcUnload(SB),NOSPLIT,$0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD addr+8(FP), R15 + BYTE $0x0A // SVC 09 + BYTE $0x09 + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, rc+0(FP) // Return SVC return code + RET + +// func gettid() uint64 +TEXT ·gettid(SB), NOSPLIT, $0 + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get CEECAATHDID + MOVD CAA(R8), R9 + MOVD 0x3D0(R9), R9 + MOVD R9, ret+0(FP) + + RET diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index df5204877..0b7c6adb8 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd // +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 3a6ac648d..394a3965b 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 5e5fb4510..65a998508 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc +//go:build aix && ppc +// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8b401244c..8fc08ad0a 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc64 +//go:build aix && ppc64 +// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go new file mode 100644 index 000000000..a388e59a0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -0,0 +1,29 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +// Functions to access/create device major and minor numbers matching the +// encoding used by z/OS. +// +// The information below is extracted and adapted from macros. + +package unix + +// Major returns the major component of a z/OS device number. +func Major(dev uint64) uint32 { + return uint32((dev >> 16) & 0x0000FFFF) +} + +// Minor returns the minor component of a z/OS device number. +func Minor(dev uint64) uint32 { + return uint32(dev & 0x0000FFFF) +} + +// Mkdev returns a z/OS device number generated from the given major and minor +// components. +func Mkdev(major, minor uint32) uint64 { + return (uint64(major) << 16) | uint64(minor) +} diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 304016b68..e74e5eaa3 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index 86781eac2..a52026557 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 // +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 8822d8541..4362f47e2 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh // +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 84178b0a1..29ccc4d13 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go new file mode 100644 index 000000000..cedaf7e02 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/epoll_zos.go @@ -0,0 +1,221 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "sync" +) + +// This file simulates epoll on z/OS using poll. + +// Analogous to epoll_event on Linux. +// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + EPOLLERR = 0x8 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDNORM = 0x40 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + // The following constants are part of the epoll API, but represent + // currently unsupported functionality on z/OS. + // EPOLL_CLOEXEC = 0x80000 + // EPOLLET = 0x80000000 + // EPOLLONESHOT = 0x40000000 + // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis + // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode + // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability +) + +// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL +// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). + +// epToPollEvt converts epoll event field to poll equivalent. +// In epoll, Events is a 32-bit field, while poll uses 16 bits. +func epToPollEvt(events uint32) int16 { + var ep2p = map[uint32]int16{ + EPOLLIN: POLLIN, + EPOLLOUT: POLLOUT, + EPOLLHUP: POLLHUP, + EPOLLPRI: POLLPRI, + EPOLLERR: POLLERR, + } + + var pollEvts int16 = 0 + for epEvt, pEvt := range ep2p { + if (events & epEvt) != 0 { + pollEvts |= pEvt + } + } + + return pollEvts +} + +// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. +func pToEpollEvt(revents int16) uint32 { + var p2ep = map[int16]uint32{ + POLLIN: EPOLLIN, + POLLOUT: EPOLLOUT, + POLLHUP: EPOLLHUP, + POLLPRI: EPOLLPRI, + POLLERR: EPOLLERR, + } + + var epollEvts uint32 = 0 + for pEvt, epEvt := range p2ep { + if (revents & pEvt) != 0 { + epollEvts |= epEvt + } + } + + return epollEvts +} + +// Per-process epoll implementation. +type epollImpl struct { + mu sync.Mutex + epfd2ep map[int]*eventPoll + nextEpfd int +} + +// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. +// On Linux, this is an in-kernel data structure accessed through a fd. +type eventPoll struct { + mu sync.Mutex + fds map[int]*EpollEvent +} + +// epoll impl for this process. +var impl epollImpl = epollImpl{ + epfd2ep: make(map[int]*eventPoll), + nextEpfd: 0, +} + +func (e *epollImpl) epollcreate(size int) (epfd int, err error) { + e.mu.Lock() + defer e.mu.Unlock() + epfd = e.nextEpfd + e.nextEpfd++ + + e.epfd2ep[epfd] = &eventPoll{ + fds: make(map[int]*EpollEvent), + } + return epfd, nil +} + +func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { + return e.epollcreate(4) +} + +func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { + e.mu.Lock() + defer e.mu.Unlock() + + ep, ok := e.epfd2ep[epfd] + if !ok { + + return EBADF + } + + switch op { + case EPOLL_CTL_ADD: + // TODO(neeilan): When we make epfds and fds disjoint, detect epoll + // loops here (instances watching each other) and return ELOOP. + if _, ok := ep.fds[fd]; ok { + return EEXIST + } + ep.fds[fd] = event + case EPOLL_CTL_MOD: + if _, ok := ep.fds[fd]; !ok { + return ENOENT + } + ep.fds[fd] = event + case EPOLL_CTL_DEL: + if _, ok := ep.fds[fd]; !ok { + return ENOENT + } + delete(ep.fds, fd) + + } + return nil +} + +// Must be called while holding ep.mu +func (ep *eventPoll) getFds() []int { + fds := make([]int, len(ep.fds)) + for fd := range ep.fds { + fds = append(fds, fd) + } + return fds +} + +func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { + e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait + ep, ok := e.epfd2ep[epfd] + + if !ok { + e.mu.Unlock() + return 0, EBADF + } + + pollfds := make([]PollFd, 4) + for fd, epollevt := range ep.fds { + pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) + } + e.mu.Unlock() + + n, err = Poll(pollfds, msec) + if err != nil { + return n, err + } + + i := 0 + for _, pFd := range pollfds { + if pFd.Revents != 0 { + events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} + i++ + } + + if i == n { + break + } + } + + return n, nil +} + +func EpollCreate(size int) (fd int, err error) { + return impl.epollcreate(size) +} + +func EpollCreate1(flag int) (fd int, err error) { + return impl.epollcreate1(flag) +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return impl.epollctl(epfd, op, fd, event) +} + +// Because EpollWait mutates events, the caller is expected to coordinate +// concurrent access if calling with the same epfd from multiple goroutines. +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return impl.epollwait(epfd, events, msec) +} diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 4dc534864..e9b991258 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build dragonfly || freebsd || linux || netbsd || openbsd // +build dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 8db48e5e0..29d44808b 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,386 linux,arm linux,mips linux,mipsle +//go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) +// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index b27be0a01..a8068f94f 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go new file mode 100644 index 000000000..e377cc9f4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go @@ -0,0 +1,164 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "unsafe" +) + +// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + var stat_v Statvfs_t + err = Fstatvfs(fd, &stat_v) + if err == nil { + // populate stat + stat.Type = 0 + stat.Bsize = stat_v.Bsize + stat.Blocks = stat_v.Blocks + stat.Bfree = stat_v.Bfree + stat.Bavail = stat_v.Bavail + stat.Files = stat_v.Files + stat.Ffree = stat_v.Ffree + stat.Fsid = stat_v.Fsid + stat.Namelen = stat_v.Namemax + stat.Frsize = stat_v.Frsize + stat.Flags = stat_v.Flag + for passn := 0; passn < 5; passn++ { + switch passn { + case 0: + err = tryGetmntent64(stat) + break + case 1: + err = tryGetmntent128(stat) + break + case 2: + err = tryGetmntent256(stat) + break + case 3: + err = tryGetmntent512(stat) + break + case 4: + err = tryGetmntent1024(stat) + break + default: + break + } + //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) + if err == nil || err != nil && err != ERANGE { + break + } + } + } + return err +} + +func tryGetmntent64(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [64]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent128(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [128]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent256(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [256]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent512(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [512]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent1024(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [1024]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 86032c11e..0dee23222 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build gccgo -// +build !aix +//go:build gccgo && !aix +// +build gccgo,!aix package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index 251a977a8..e60e49a3d 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gccgo && linux && amd64 // +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index 564167861..6c7ad052e 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go new file mode 100644 index 000000000..48773f730 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -0,0 +1,196 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import ( + "runtime" + "unsafe" +) + +// IoctlRetInt performs an ioctl operation specified by req on a device +// associated with opened file descriptor fd, and returns a non-negative +// integer that is returned by the ioctl syscall. +func IoctlRetInt(fd int, req uint) (int, error) { + ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0) + if err != 0 { + return 0, err + } + return int(ret), nil +} + +func IoctlGetUint32(fd int, req uint) (uint32, error) { + var value uint32 + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetRTCTime(fd int) (*RTCTime, error) { + var value RTCTime + err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlSetRTCTime(fd int, value *RTCTime) error { + err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { + var value RTCWkAlrm + err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error { + err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +type ifreqEthtool struct { + name [IFNAMSIZ]byte + data unsafe.Pointer +} + +// IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network +// device specified by ifname. +func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { + // Leave room for terminating NULL byte. + if len(ifname) >= IFNAMSIZ { + return nil, EINVAL + } + + value := EthtoolDrvinfo{ + Cmd: ETHTOOL_GDRVINFO, + } + ifreq := ifreqEthtool{ + data: unsafe.Pointer(&value), + } + copy(ifreq.name[:], ifname) + err := ioctl(fd, SIOCETHTOOL, uintptr(unsafe.Pointer(&ifreq))) + runtime.KeepAlive(ifreq) + return &value, err +} + +// IoctlGetWatchdogInfo fetches information about a watchdog device from the +// Linux watchdog API. For more information, see: +// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. +func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { + var value WatchdogInfo + err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For +// more information, see: +// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. +func IoctlWatchdogKeepalive(fd int) error { + return ioctl(fd, WDIOC_KEEPALIVE, 0) +} + +// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the +// range of data conveyed in value to the file associated with the file +// descriptor destFd. See the ioctl_ficlonerange(2) man page for details. +func IoctlFileCloneRange(destFd int, value *FileCloneRange) error { + err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file +// associated with the file description srcFd to the file associated with the +// file descriptor destFd. See the ioctl_ficlone(2) man page for details. +func IoctlFileClone(destFd, srcFd int) error { + return ioctl(destFd, FICLONE, uintptr(srcFd)) +} + +type FileDedupeRange struct { + Src_offset uint64 + Src_length uint64 + Reserved1 uint16 + Reserved2 uint32 + Info []FileDedupeRangeInfo +} + +type FileDedupeRangeInfo struct { + Dest_fd int64 + Dest_offset uint64 + Bytes_deduped uint64 + Status int32 + Reserved uint32 +} + +// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the +// range of data conveyed in value from the file associated with the file +// descriptor srcFd to the value.Info destinations. See the +// ioctl_fideduperange(2) man page for details. +func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error { + buf := make([]byte, SizeofRawFileDedupeRange+ + len(value.Info)*SizeofRawFileDedupeRangeInfo) + rawrange := (*RawFileDedupeRange)(unsafe.Pointer(&buf[0])) + rawrange.Src_offset = value.Src_offset + rawrange.Src_length = value.Src_length + rawrange.Dest_count = uint16(len(value.Info)) + rawrange.Reserved1 = value.Reserved1 + rawrange.Reserved2 = value.Reserved2 + + for i := range value.Info { + rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer( + uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) + + uintptr(i*SizeofRawFileDedupeRangeInfo))) + rawinfo.Dest_fd = value.Info[i].Dest_fd + rawinfo.Dest_offset = value.Info[i].Dest_offset + rawinfo.Bytes_deduped = value.Info[i].Bytes_deduped + rawinfo.Status = value.Info[i].Status + rawinfo.Reserved = value.Info[i].Reserved + } + + err := ioctl(srcFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(&buf[0]))) + + // Output + for i := range value.Info { + rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer( + uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) + + uintptr(i*SizeofRawFileDedupeRangeInfo))) + value.Info[i].Dest_fd = rawinfo.Dest_fd + value.Info[i].Dest_offset = rawinfo.Dest_offset + value.Info[i].Bytes_deduped = rawinfo.Bytes_deduped + value.Info[i].Status = rawinfo.Status + value.Info[i].Reserved = rawinfo.Reserved + } + + return err +} + +func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error { + err := ioctl(fd, HIDIOCGRDESC, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) { + var value HIDRawDevInfo + err := ioctl(fd, HIDIOCGRAWINFO, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlHIDGetRawName(fd int) (string, error) { + var value [_HIDIOCGRAWNAME_LEN]byte + err := ioctl(fd, _HIDIOCGRAWNAME, uintptr(unsafe.Pointer(&value[0]))) + return ByteSliceToString(value[:]), err +} + +func IoctlHIDGetRawPhys(fd int) (string, error) { + var value [_HIDIOCGRAWPHYS_LEN]byte + err := ioctl(fd, _HIDIOCGRAWPHYS, uintptr(unsafe.Pointer(&value[0]))) + return ByteSliceToString(value[:]), err +} + +func IoctlHIDGetRawUniq(fd int) (string, error) { + var value [_HIDIOCGRAWUNIQ_LEN]byte + err := ioctl(fd, _HIDIOCGRAWUNIQ, uintptr(unsafe.Pointer(&value[0]))) + return ByteSliceToString(value[:]), err +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go new file mode 100644 index 000000000..5384e7d91 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -0,0 +1,74 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "runtime" + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value is expected to be TCSETS, TCSETSW, or TCSETSF +func IoctlSetTermios(fd int, req uint, value *Termios) error { + if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { + return ENOSYS + } + err := Tcsetattr(fd, int(req), value) + runtime.KeepAlive(value) + return err +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +// IoctlGetTermios performs an ioctl on fd with a *Termios. +// +// The req value is expected to be TCGETS +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + if req != TCGETS { + return &value, ENOSYS + } + err := Tcgetattr(fd, &value) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index d257fac50..396aadf86 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -70,23 +70,11 @@ aix_ppc64) mksyscall="go run mksyscall_aix_ppc64.go -aix" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; -darwin_386) - mkerrors="$mkerrors -m32" - mksyscall="go run mksyscall.go -l32" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" - ;; darwin_amd64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; -darwin_arm) - mkerrors="$mkerrors" - mksyscall="go run mksyscall.go -l32" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" - ;; darwin_arm64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" @@ -199,7 +187,7 @@ illumos_amd64) mksyscall="go run mksyscall_solaris.go" mkerrors= mksysnum= - mktypes= + mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; *) echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index b8313e98a..3f670faba 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -56,6 +56,7 @@ includes_Darwin=' #define _DARWIN_C_SOURCE #define KERNEL #define _DARWIN_USE_64_BIT_INODE +#define __APPLE_USE_RFC_3542 #include #include #include @@ -65,6 +66,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -114,6 +116,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -213,6 +216,8 @@ struct ltchars { #include #include #include +#include +#include #include #include #include @@ -223,6 +228,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -233,6 +239,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -252,6 +259,7 @@ struct ltchars { #include #include +#include #include #if defined(__sparc__) @@ -299,6 +307,17 @@ struct ltchars { // Including linux/l2tp.h here causes conflicts between linux/in.h // and netinet/in.h included via net/route.h above. #define IPPROTO_L2TP 115 + +// Copied from linux/hid.h. +// Keep in sync with the size of the referenced fields. +#define _HIDIOCGRAWNAME_LEN 128 // sizeof_field(struct hid_device, name) +#define _HIDIOCGRAWPHYS_LEN 64 // sizeof_field(struct hid_device, phys) +#define _HIDIOCGRAWUNIQ_LEN 64 // sizeof_field(struct hid_device, uniq) + +#define _HIDIOCGRAWNAME HIDIOCGRAWNAME(_HIDIOCGRAWNAME_LEN) +#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) +#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) + ' includes_NetBSD=' @@ -388,10 +407,11 @@ includes_SunOS=' #include #include #include +#include #include -#include #include #include +#include ' @@ -446,6 +466,8 @@ ccflags="$@" $2 !~ /^EPROC_/ && $2 !~ /^EQUIV_/ && $2 !~ /^EXPR_/ && + $2 !~ /^EVIOC/ && + $2 !~ /^EV_/ && $2 ~ /^E[A-Z0-9_]+$/ || $2 ~ /^B[0-9_]+$/ || $2 ~ /^(OLD|NEW)DEV$/ || @@ -480,10 +502,13 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ || + $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || + $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || - $2 == "ICMPV6_FILTER" || + $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || $2 == "SOMAXCONN" || $2 == "NAME_MAX" || $2 == "IFNAMSIZ" || @@ -570,6 +595,12 @@ ccflags="$@" $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || $2 ~ /^FAN_|FANOTIFY_/ || + $2 == "HID_MAX_DESCRIPTOR_SIZE" || + $2 ~ /^_?HIDIOC/ || + $2 ~ /^BUS_(USB|HIL|BLUETOOTH|VIRTUAL)$/ || + $2 ~ /^MTD/ || + $2 ~ /^OTP/ || + $2 ~ /^MEM/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} @@ -607,6 +638,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo +echo "//go:build ${GOARCH} && ${GOOS}" echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index bc2f3629a..53f1b4c5b 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index fc568b540..463c3eff7 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && !ios // +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index 183441c9a..ed0509a01 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ios // +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 61712b51c..6f6c5fec5 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin && race) || (linux && race) || (freebsd && race) // +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index ad026678c..706e1322a 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly +//go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos +// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 3a90aa6df..4d6257569 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd // +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 5fdae40b3..2a4ba47c4 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin // +build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 003916ed7..453a942c5 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 57a0021da..0840fe4a5 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin freebsd linux netbsd openbsd solaris +//go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix @@ -36,6 +37,10 @@ func cmsgAlignOf(salen int) int { if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm64" { salign = 16 } + case "zos": + // z/OS socket macros use [32-bit] sizeof(int) alignment, + // not pointer width. + salign = SizeofInt } return (salen + salign - 1) & ^(salign - 1) diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go index 17fb69868..8ba89ed86 100644 --- a/vendor/golang.org/x/sys/unix/str.go +++ b/vendor/golang.org/x/sys/unix/str.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index ab75ef9cc..649fa8740 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 440815382..d8efb715f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix // +build aix // Aix system calls. @@ -251,7 +252,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil @@ -419,8 +420,8 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) -//sys Open(path string, mode int, perm uint32) (fd int, err error) = open64 -//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) = open64 +//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) @@ -439,8 +440,8 @@ func (w WaitStatus) TrapCause() int { return -1 } //sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Umask(mask int) (oldmask int) //sysnb Uname(buf *Utsname) (err error) -//sys Unlink(path string) (err error) -//sys Unlinkat(dirfd int, path string, flags int) (err error) +//sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys write(fd int, p []byte) (n int, err error) //sys readlen(fd int, p *byte, np int) (n int, err error) = read @@ -514,7 +515,7 @@ func Munmap(b []byte) (err error) { //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) -//sysnb pipe(p *[2]_C_int) (err error) +//sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index b3c8e3301..e92a0be16 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc +//go:build aix && ppc +// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 9a6e02417..16eed1709 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc64 +//go:build aix && ppc64 +// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index bc634a280..95ac3946b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || netbsd || openbsd // +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems @@ -318,7 +319,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { return anyToSockaddr(fd, &rsa) } -//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. @@ -332,8 +333,8 @@ func GetsockoptString(fd, level, opt int) (string, error) { return string(buf[:vallen-1]), nil } -//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) -//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { @@ -626,7 +627,7 @@ func Futimes(fd int, tv []Timeval) error { return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { if len(fds) == 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go index b31ef0358..b0098607c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && go1.12 && !go1.13 // +build darwin,go1.12,!go1.13 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go index ee852f1ab..1596426b1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && go1.13 // +build darwin,go1.13 package unix @@ -16,7 +17,7 @@ import ( //sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) func fdopendir(fd int) (dir uintptr, err error) { - r0, _, e1 := syscall_syscallPtr(funcPC(libc_fdopendir_trampoline), uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) dir = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,7 +25,7 @@ func fdopendir(fd int) (dir uintptr, err error) { return } -func libc_fdopendir_trampoline() +var libc_fdopendir_trampoline_addr uintptr //go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 16f9c226b..9945e5f96 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -119,7 +119,7 @@ type attrList struct { Forkattr uint32 } -//sysnb pipe(p *[2]int32) (err error) +//sysnb pipe(p *[2]int32) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -272,7 +272,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { options) } -//sys setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) +//sys setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { // Darwin doesn't support SYS_UTIMENSAT @@ -320,7 +320,7 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return err } -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} @@ -378,6 +378,26 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return } +func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { + var value IPMreqn + vallen := _Socklen(SizeofIPMreqn) + errno := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, errno +} + +func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) +} + +// GetsockoptXucred is a getsockopt wrapper that returns an Xucred struct. +// The usual level and opt are SOL_LOCAL and LOCAL_PEERCRED, respectively. +func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { + x := new(Xucred) + vallen := _Socklen(SizeofXucred) + err := getsockopt(fd, level, opt, unsafe.Pointer(x), &vallen) + return x, err +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) /* @@ -472,8 +492,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) -//sys munmap(addr uintptr, length uintptr) (err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go deleted file mode 100644 index ee065fcf2..000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386,darwin - -package unix - -import "syscall" - -func setTimespec(sec, nsec int64) Timespec { - return Timespec{Sec: int32(sec), Nsec: int32(nsec)} -} - -func setTimeval(sec, usec int64) Timeval { - return Timeval{Sec: int32(sec), Usec: int32(usec)} -} - -func SetKevent(k *Kevent_t, fd, mode, flags int) { - k.Ident = uint32(fd) - k.Filter = int16(mode) - k.Flags = uint16(flags) -} - -func (iov *Iovec) SetLen(length int) { - iov.Len = uint32(length) -} - -func (msghdr *Msghdr) SetControllen(length int) { - msghdr.Controllen = uint32(length) -} - -func (msghdr *Msghdr) SetIovlen(length int) { - msghdr.Iovlen = int32(length) -} - -func (cmsg *Cmsghdr) SetLen(length int) { - cmsg.Len = uint32(length) -} - -func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 -//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 -//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 -//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 -//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 -//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace -//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 -//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 7a1f64a7b..b37310ce9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && darwin // +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go deleted file mode 100644 index d30735c5d..000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix - -import "syscall" - -func ptrace1(request int, pid int, addr uintptr, data uintptr) error { - return ENOTSUP -} - -func setTimespec(sec, nsec int64) Timespec { - return Timespec{Sec: int32(sec), Nsec: int32(nsec)} -} - -func setTimeval(sec, usec int64) Timeval { - return Timeval{Sec: int32(sec), Usec: int32(usec)} -} - -func SetKevent(k *Kevent_t, fd, mode, flags int) { - k.Ident = uint32(fd) - k.Filter = int16(mode) - k.Flags = uint16(flags) -} - -func (iov *Iovec) SetLen(length int) { - iov.Len = uint32(length) -} - -func (msghdr *Msghdr) SetControllen(length int) { - msghdr.Controllen = uint32(length) -} - -func (msghdr *Msghdr) SetIovlen(length int) { - msghdr.Iovlen = int32(length) -} - -func (cmsg *Cmsghdr) SetLen(length int) { - cmsg.Len = uint32(length) -} - -func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic - -//sys Fstat(fd int, stat *Stat_t) (err error) -//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) -//sys Fstatfs(fd int, stat *Statfs_t) (err error) -//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT -//sys Lstat(path string, stat *Stat_t) (err error) -//sys Stat(path string, stat *Stat_t) (err error) -//sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 9f85fd404..d51ec9963 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && darwin // +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index f34c86c89..53c96641f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,11 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && go1.12 // +build darwin,go1.12 package unix -import "unsafe" +import _ "unsafe" // Implemented in the runtime package (runtime/sys_darwin.go) func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) @@ -24,10 +25,3 @@ func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) //go:linkname syscall_rawSyscall syscall.rawSyscall //go:linkname syscall_rawSyscall6 syscall.rawSyscall6 //go:linkname syscall_syscallPtr syscall.syscallPtr - -// Find the entry point for f. See comments in runtime/proc.go for the -// function of the same name. -//go:nosplit -func funcPC(f func()) uintptr { - return **(**uintptr)(unsafe.Pointer(&f)) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index a4f2944a2..5af108a50 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -95,7 +95,7 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -//sysnb pipe() (r int, w int, err error) +//sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -105,16 +105,16 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (r int, w int, err error) -func Pipe2(p []int, flags int) error { +func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { return EINVAL } var pp [2]_C_int - err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + // pipe2 on dragonfly takes an fds array as an argument, but still + // returns the file descriptors. + p[0], p[1], err = pipe2(&pp, flags) return err } @@ -170,7 +170,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error { err := sysctl(mib, old, oldlen, nil, 0) @@ -337,8 +337,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) -//sys munmap(addr uintptr, length uintptr) (err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index a6b4830ac..4e2d32120 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && dragonfly // +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index acc00c2e6..18c392cf3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -126,6 +126,15 @@ func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) } +// GetsockoptXucred is a getsockopt wrapper that returns an Xucred struct. +// The usual level and opt are SOL_LOCAL and LOCAL_PEERCRED, respectively. +func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { + x := new(Xucred) + vallen := _Socklen(SizeofXucred) + err := getsockopt(fd, level, opt, unsafe.Pointer(x), &vallen) + return x, err +} + func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -188,9 +197,9 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { return ENOSYS } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} @@ -665,8 +674,8 @@ func PtraceSingleStep(pid int) (err error) { //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) -//sys munmap(addr uintptr, length uintptr) (err error) +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 72a506ddc..342fc32b1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && freebsd // +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index d5e376aca..a32d5aa4a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && freebsd // +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 4ea45bce5..1e36d39ab 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && freebsd // +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index aa5326db1..a09a1537b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && freebsd // +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 7a2d4120f..8c5357683 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -1,14 +1,19 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // illumos system calls not present on Solaris. +//go:build amd64 && illumos // +build amd64,illumos package unix -import "unsafe" +import ( + "fmt" + "runtime" + "unsafe" +) func bytes2iovec(bs [][]byte) []Iovec { iovecs := make([]Iovec, len(bs)) @@ -75,3 +80,99 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { } return } + +//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) + +func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Len: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Len: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), + } + } + return putmsg(fd, clp, datap, flags) +} + +//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) + +func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Maxlen: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Maxlen: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), + } + } + + if err = getmsg(fd, clp, datap, &flags); err != nil { + return nil, nil, 0, err + } + + if len(cl) > 0 { + retCl = cl[:clp.Len] + } + if len(data) > 0 { + retData = data[:datap.Len] + } + return retCl, retData, flags, nil +} + +func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { + return ioctlRet(fd, req, uintptr(arg)) +} + +func IoctlSetString(fd int, req uint, val string) error { + bs := make([]byte, len(val)+1) + copy(bs[:len(bs)-1], val) + err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0]))) + runtime.KeepAlive(&bs[0]) + return err +} + +// Lifreq Helpers + +func (l *Lifreq) SetName(name string) error { + if len(name) >= len(l.Name) { + return fmt.Errorf("name cannot be more than %d characters", len(l.Name)-1) + } + for i := range name { + l.Name[i] = int8(name[i]) + } + return nil +} + +func (l *Lifreq) SetLifruInt(d int) { + *(*int)(unsafe.Pointer(&l.Lifru[0])) = d +} + +func (l *Lifreq) GetLifruInt() int { + return *(*int)(unsafe.Pointer(&l.Lifru[0])) +} + +func IoctlLifreq(fd int, req uint, l *Lifreq) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(l))) +} + +// Strioctl Helpers + +func (s *Strioctl) SetInt(i int) { + s.Len = int32(unsafe.Sizeof(i)) + s.Dp = (*int8)(unsafe.Pointer(&i)) +} + +func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { + return ioctlRet(fd, req, uintptr(unsafe.Pointer(s))) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 28be1306e..41b91fdfb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -70,88 +70,7 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. - -// IoctlRetInt performs an ioctl operation specified by req on a device -// associated with opened file descriptor fd, and returns a non-negative -// integer that is returned by the ioctl syscall. -func IoctlRetInt(fd int, req uint) (int, error) { - ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0) - if err != 0 { - return 0, err - } - return int(ret), nil -} - -func IoctlSetRTCTime(fd int, value *RTCTime) error { - err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error { - err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -func IoctlGetUint32(fd int, req uint) (uint32, error) { - var value uint32 - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetRTCTime(fd int) (*RTCTime, error) { - var value RTCTime - err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -// IoctlGetWatchdogInfo fetches information about a watchdog device from the -// Linux watchdog API. For more information, see: -// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. -func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { - var value WatchdogInfo - err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { - var value RTCWkAlrm - err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the -// range of data conveyed in value to the file associated with the file -// descriptor destFd. See the ioctl_ficlonerange(2) man page for details. -func IoctlFileCloneRange(destFd int, value *FileCloneRange) error { - err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file -// associated with the file description srcFd to the file associated with the -// file descriptor destFd. See the ioctl_ficlone(2) man page for details. -func IoctlFileClone(destFd, srcFd int) error { - return ioctl(destFd, FICLONE, uintptr(srcFd)) -} - -// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the -// range of data conveyed in value with the file associated with the file -// descriptor destFd. See the ioctl_fideduperange(2) man page for details. -func IoctlFileDedupeRange(destFd int, value *FileDedupeRange) error { - err := ioctl(destFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For -// more information, see: -// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. -func IoctlWatchdogKeepalive(fd int) error { - return ioctl(fd, WDIOC_KEEPALIVE, 0) -} +// These are defined in ioctl.go and ioctl_linux.go. //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) @@ -778,16 +697,19 @@ type SockaddrVM struct { // CID and Port specify a context ID and port address for a VM socket. // Guests have a unique CID, and hosts may have a well-known CID of: // - VMADDR_CID_HYPERVISOR: refers to the hypervisor process. + // - VMADDR_CID_LOCAL: refers to local communication (loopback). // - VMADDR_CID_HOST: refers to other processes on the host. - CID uint32 - Port uint32 - raw RawSockaddrVM + CID uint32 + Port uint32 + Flags uint8 + raw RawSockaddrVM } func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_VSOCK sa.raw.Port = sa.Port sa.raw.Cid = sa.CID + sa.raw.Flags = sa.Flags return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil } @@ -982,6 +904,46 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrIUCV, nil } +type SockaddrNFC struct { + DeviceIdx uint32 + TargetIdx uint32 + NFCProtocol uint32 + raw RawSockaddrNFC +} + +func (sa *SockaddrNFC) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Sa_family = AF_NFC + sa.raw.Dev_idx = sa.DeviceIdx + sa.raw.Target_idx = sa.TargetIdx + sa.raw.Nfc_protocol = sa.NFCProtocol + return unsafe.Pointer(&sa.raw), SizeofSockaddrNFC, nil +} + +type SockaddrNFCLLCP struct { + DeviceIdx uint32 + TargetIdx uint32 + NFCProtocol uint32 + DestinationSAP uint8 + SourceSAP uint8 + ServiceName string + raw RawSockaddrNFCLLCP +} + +func (sa *SockaddrNFCLLCP) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Sa_family = AF_NFC + sa.raw.Dev_idx = sa.DeviceIdx + sa.raw.Target_idx = sa.TargetIdx + sa.raw.Nfc_protocol = sa.NFCProtocol + sa.raw.Dsap = sa.DestinationSAP + sa.raw.Ssap = sa.SourceSAP + if len(sa.ServiceName) > len(sa.raw.Service_name) { + return nil, 0, EINVAL + } + copy(sa.raw.Service_name[:], sa.ServiceName) + sa.raw.SetServiceNameLen(len(sa.ServiceName)) + return unsafe.Pointer(&sa.raw), SizeofSockaddrNFCLLCP, nil +} + var socketProtocol = func(fd int) (int, error) { return GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) } @@ -1092,8 +1054,9 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { case AF_VSOCK: pp := (*RawSockaddrVM)(unsafe.Pointer(rsa)) sa := &SockaddrVM{ - CID: pp.Cid, - Port: pp.Port, + CID: pp.Cid, + Port: pp.Port, + Flags: pp.Flags, } return sa, nil case AF_BLUETOOTH: @@ -1221,6 +1184,37 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } return sa, nil } + case AF_NFC: + proto, err := socketProtocol(fd) + if err != nil { + return nil, err + } + switch proto { + case NFC_SOCKPROTO_RAW: + pp := (*RawSockaddrNFC)(unsafe.Pointer(rsa)) + sa := &SockaddrNFC{ + DeviceIdx: pp.Dev_idx, + TargetIdx: pp.Target_idx, + NFCProtocol: pp.Nfc_protocol, + } + return sa, nil + case NFC_SOCKPROTO_LLCP: + pp := (*RawSockaddrNFCLLCP)(unsafe.Pointer(rsa)) + if uint64(pp.Service_name_len) > uint64(len(pp.Service_name)) { + return nil, EINVAL + } + sa := &SockaddrNFCLLCP{ + DeviceIdx: pp.Dev_idx, + TargetIdx: pp.Target_idx, + NFCProtocol: pp.Nfc_protocol, + DestinationSAP: pp.Dsap, + SourceSAP: pp.Ssap, + ServiceName: string(pp.Service_name[:pp.Service_name_len]), + } + return sa, nil + default: + return nil, EINVAL + } } return nil, EAFNOSUPPORT } @@ -1228,7 +1222,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { func Accept(fd int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny - nfd, err = accept(fd, &rsa, &len) + // Try accept4 first for Android, then try accept for kernel older than 2.6.28 + nfd, err = accept4(fd, &rsa, &len, 0) + if err == ENOSYS { + nfd, err = accept(fd, &rsa, &len) + } if err != nil { return } @@ -1482,8 +1480,8 @@ func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error return keyctlRestrictKeyringByType(KEYCTL_RESTRICT_KEYRING, ringid, keyType, restriction) } -//sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL -//sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL +//sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL +//sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var msg Msghdr @@ -1798,6 +1796,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) +//sys CloseRange(first uint, last uint, flags uint) (err error) //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys DeleteModule(name string, flags int) (err error) //sys Dup(oldfd int) (fd int, err error) @@ -1860,8 +1859,8 @@ func Getpgrp() (pid int) { //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 -//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) +//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) @@ -1934,9 +1933,9 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys Syncfs(fd int) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error) //sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) -//sysnb TimerfdCreate(clockid int, flags int) (fd int, err error) -//sysnb TimerfdGettime(fd int, currValue *ItimerSpec) (err error) -//sysnb TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) +//sysnb TimerfdCreate(clockid int, flags int) (fd int, err error) +//sysnb TimerfdGettime(fd int, currValue *ItimerSpec) (err error) +//sysnb TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) //sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Umask(mask int) (oldmask int) @@ -2196,8 +2195,8 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { return EACCES } -//sys nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) = SYS_NAME_TO_HANDLE_AT -//sys openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) = SYS_OPEN_BY_HANDLE_AT +//sys nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) = SYS_NAME_TO_HANDLE_AT +//sys openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) = SYS_OPEN_BY_HANDLE_AT // fileHandle is the argument to nameToHandleAt and openByHandleAt. We // originally tried to generate it via unix/linux/types.go with "type diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index c97c2ee53..b430536c8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && linux // +build 386,linux package unix @@ -31,7 +32,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { @@ -98,7 +99,7 @@ type rlimit32 struct { Max uint32 } -//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) @@ -129,7 +130,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { err = prlimit(0, resource, rlim, nil) @@ -377,6 +378,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 72efe86ed..85cd97da0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && linux // +build amd64,linux package unix @@ -138,7 +139,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { @@ -171,6 +172,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index baa771f8a..8b0f0f3aa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,linux -// +build gc +//go:build amd64 && linux && gc +// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 496837b1e..39a864d4e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && linux // +build arm,linux package unix @@ -35,7 +36,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { @@ -129,8 +130,8 @@ func Utime(path string, buf *Utimbuf) error { //sys utimes(path string, times *[2]Timeval) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 //sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 @@ -177,7 +178,7 @@ type rlimit32 struct { Max uint32 } -//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_UGETRLIMIT +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_UGETRLIMIT const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) @@ -208,7 +209,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { err = prlimit(0, resource, rlim, nil) @@ -255,6 +256,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index c6de6b913..7f27ebf2f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && linux // +build arm64,linux package unix @@ -155,7 +156,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { @@ -206,6 +207,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + func InotifyInit() (fd int, err error) { return InotifyInit1(0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 9edf3961b..2b1168d7d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gc // +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 90e33d8cf..9843fb489 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gc && 386 // +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index 1a97baae7..a6008fccd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && gc && linux // +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 308eb7aec..7740af242 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gccgo && 386 // +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index aa7fc9e19..e16a12299 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gccgo && arm // +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index f0287476c..27aee81d9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips64 || mips64le) // +build linux // +build mips64 mips64le @@ -104,7 +105,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { @@ -216,6 +217,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + func InotifyInit() (fd int, err error) { return InotifyInit1(0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index c11328111..3a5621e37 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips || mipsle) // +build linux // +build mips mipsle @@ -112,7 +113,7 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { @@ -125,7 +126,7 @@ func Pipe2(p []int, flags int) (err error) { return } -//sysnb pipe() (p1 int, p2 int, err error) +//sysnb pipe() (p1 int, p2 int, err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -153,7 +154,7 @@ type rlimit32 struct { Max uint32 } -//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT func Getrlimit(resource int, rlim *Rlimit) (err error) { err = prlimit(0, resource, nil, rlim) @@ -181,7 +182,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { err = prlimit(0, resource, rlim, nil) @@ -228,6 +229,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go new file mode 100644 index 000000000..cf0d36f76 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -0,0 +1,276 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && ppc +// +build linux +// +build ppc + +package unix + +import ( + "syscall" + "unsafe" +) + +//sys dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 +//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getuid() (uid int) +//sysnb InotifyInit() (fd int, err error) +//sys Ioperm(from int, num int, on int) (err error) +//sys Iopl(level int) (err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Listen(s int, n int) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys Pause() (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) +//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 +//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sys Ustat(dev int, ubuf *Ustat_t) (err error) +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) + +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) +//sysnb Gettimeofday(tv *Timeval) (err error) +//sysnb Time(t *Time_t) (tt Time_t, err error) +//sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64_64, uintptr(fd), uintptr(advice), uintptr(offset>>32), uintptr(offset), uintptr(length>>32), uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func seek(fd int, offset int64, whence int) (int64, syscall.Errno) { + var newoffset int64 + offsetLow := uint32(offset & 0xffffffff) + offsetHigh := uint32((offset >> 32) & 0xffffffff) + _, _, err := Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offsetHigh), uintptr(offsetLow), uintptr(unsafe.Pointer(&newoffset)), uintptr(whence), 0) + return newoffset, err +} + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + newoffset, errno := seek(fd, offset, whence) + if errno != 0 { + return 0, errno + } + return newoffset, nil +} + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +func Statfs(path string, buf *Statfs_t) (err error) { + pathp, err := BytePtrFromString(path) + if err != nil { + return err + } + _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(pathp)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) + if e != 0 { + err = e + } + return +} + +//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + page := uintptr(offset / 4096) + if offset != int64(page)*4096 { + return 0, EINVAL + } + return mmap2(addr, length, prot, flags, fd, page) +} + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} +} + +type rlimit32 struct { + Cur uint32 + Max uint32 +} + +//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_UGETRLIMIT + +const rlimInf32 = ^uint32(0) +const rlimInf64 = ^uint64(0) + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + err = getrlimit(resource, &rl) + if err != nil { + return + } + + if rl.Cur == rlimInf32 { + rlim.Cur = rlimInf64 + } else { + rlim.Cur = uint64(rl.Cur) + } + + if rl.Max == rlimInf32 { + rlim.Max = rlimInf64 + } else { + rlim.Max = uint64(rl.Max) + } + return +} + +//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + + rl := rlimit32{} + if rlim.Cur == rlimInf64 { + rl.Cur = rlimInf32 + } else if rlim.Cur < uint64(rlimInf32) { + rl.Cur = uint32(rlim.Cur) + } else { + return EINVAL + } + if rlim.Max == rlimInf64 { + rl.Max = rlimInf32 + } else if rlim.Max < uint64(rlimInf32) { + rl.Max = uint32(rlim.Max) + } else { + return EINVAL + } + + return setrlimit(resource, &rl) +} + +func (r *PtraceRegs) PC() uint32 { return r.Nip } + +func (r *PtraceRegs) SetPC(pc uint32) { r.Nip = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint32(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + +//sys syncFileRange2(fd int, flags int, off int64, n int64) (err error) = SYS_SYNC_FILE_RANGE2 + +func SyncFileRange(fd int, off int64, n int64, flags int) error { + // The sync_file_range and sync_file_range2 syscalls differ only in the + // order of their arguments. + return syncFileRange2(fd, flags, off, n) +} + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 349374409..5259a5fea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (ppc64 || ppc64le) // +build linux // +build ppc64 ppc64le @@ -99,7 +100,11 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } -//sysnb pipe(p *[2]_C_int) (err error) +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + +//sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -112,7 +117,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index b0b150556..8ef821e5d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build riscv64 && linux // +build riscv64,linux package unix @@ -154,7 +155,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { @@ -187,6 +188,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + func InotifyInit() (fd int, err error) { return InotifyInit1(0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 2363f7499..a1c0574b5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build s390x && linux // +build s390x,linux package unix @@ -76,7 +77,7 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: usec} } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -128,6 +129,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + // Linux on s390x uses the old mmap interface, which requires arguments to be passed in a struct. // mmap2 also requires arguments to be passed in a struct; it is currently not exposed in . func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { @@ -249,7 +254,7 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen } func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) error { - args := [4]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val)} + args := [5]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen} _, _, err := Syscall(SYS_SOCKETCALL, netSetSockOpt, uintptr(unsafe.Pointer(&args)), 0) if err != 0 { return err diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index d389f1518..de14b8898 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build sparc64 && linux // +build sparc64,linux package unix @@ -115,7 +116,11 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } -//sysnb pipe(p *[2]_C_int) (err error) +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + +//sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -128,7 +133,7 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 1e6843b4c..853d5f0f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -110,7 +110,8 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -//sysnb pipe() (fd1 int, fd2 int, err error) +//sysnb pipe() (fd1 int, fd2 int, err error) + func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL @@ -119,7 +120,21 @@ func Pipe(p []int) (err error) { return } -//sys Getdents(fd int, buf []byte) (n int, err error) +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) error { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err := pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return err +} + +//sys Getdents(fd int, buf []byte) (n int, err error) + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { n, err = Getdents(fd, buf) if err != nil || basep == nil { @@ -159,7 +174,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { var value Ptmget diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 24da8b524..5199d282f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && netbsd // +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 25a0ac825..70a9c52e9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && netbsd // +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 21591ecd4..3eb5942f9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && netbsd // +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index 804749635..fc6ccfd81 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && netbsd // +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 6a50b50bd..22b550385 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -92,7 +92,7 @@ func Pipe2(p []int, flags int) error { return err } -//sys Getdents(fd int, buf []byte) (n int, err error) +//sys Getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { n, err = Getdents(fd, buf) if err != nil || basep == nil { @@ -154,7 +154,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 42b5a0e51..6baabcdcb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 6ea4b4883..bab25360e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 1c3d26fa2..8eed3c4d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index a8c458cb0..483dde99d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 184786ed9..77fcde7c1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -565,7 +565,12 @@ func Minor(dev uint64) uint32 { * Expose the ioctl function */ -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, err = ioctlRet(fd, req, arg) + return err +} func IoctlSetTermio(fd int, req uint, value *Termio) error { err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) @@ -579,7 +584,7 @@ func IoctlGetTermio(fd int, req uint) (*Termio, error) { return &value, err } -//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { if len(fds) == 0 { @@ -682,6 +687,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Statvfs(path string, vfsstat *Statvfs_t) (err error) //sys Symlink(path string, link string) (err error) //sys Sync() (err error) +//sys Sysconf(which int) (n int64, err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sys Truncate(path string, length int64) (err error) //sys Fsync(fd int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index b22a34d7a..0bd25ef81 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && solaris // +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 400ba9fbc..a7618ceb5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 87bd161ce..5898e9a52 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -2,8 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && gc && !ppc64le && !ppc64 // +build darwin dragonfly freebsd linux netbsd openbsd solaris -// +build gc,!ppc64le,!ppc64 +// +build gc +// +build !ppc64le +// +build !ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index d36216c3c..f6f707acf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (ppc64le || ppc64) && gc // +build linux // +build ppc64le ppc64 // +build gc diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go new file mode 100644 index 000000000..1ffd8bfcf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -0,0 +1,1829 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "bytes" + "runtime" + "sort" + "sync" + "syscall" + "unsafe" +) + +const ( + O_CLOEXEC = 0 // Dummy value (not supported). + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX +) + +func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) + +func copyStat(stat *Stat_t, statLE *Stat_LE_t) { + stat.Dev = uint64(statLE.Dev) + stat.Ino = uint64(statLE.Ino) + stat.Nlink = uint64(statLE.Nlink) + stat.Mode = uint32(statLE.Mode) + stat.Uid = uint32(statLE.Uid) + stat.Gid = uint32(statLE.Gid) + stat.Rdev = uint64(statLE.Rdev) + stat.Size = statLE.Size + stat.Atim.Sec = int64(statLE.Atim) + stat.Atim.Nsec = 0 //zos doesn't return nanoseconds + stat.Mtim.Sec = int64(statLE.Mtim) + stat.Mtim.Nsec = 0 //zos doesn't return nanoseconds + stat.Ctim.Sec = int64(statLE.Ctim) + stat.Ctim.Nsec = 0 //zos doesn't return nanoseconds + stat.Blksize = int64(statLE.Blksize) + stat.Blocks = statLE.Blocks +} + +func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +func svcLoad(name *byte) unsafe.Pointer +func svcUnload(name *byte, fnptr unsafe.Pointer) int64 + +func (d *Dirent) NameString() string { + if d == nil { + return "" + } + return string(d.Name[:d.Namlen]) +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Len = SizeofSockaddrInet4 + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Len = SizeofSockaddrInet6 + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n >= len(sa.raw.Path) || n == 0 { + return nil, 0, EINVAL + } + sa.raw.Len = byte(3 + n) // 2 for Family, Len; 1 for NUL + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { + // TODO(neeilan): Implement use of first param (fd) + switch rsa.Addr.Family { + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + // For z/OS, only replace NUL with @ when the + // length is not zero. + if pp.Len != 0 && pp.Path[0] == 0 { + // "Abstract" Unix domain socket. + // Rewrite leading NUL as @ for textual display. + // (This is the standard convention.) + // Not friendly to overwrite in place, + // but the callers below don't care. + pp.Path[0] = '@' + } + + // Assume path ends at NUL. + // + // For z/OS, the length of the name is a field + // in the structure. To be on the safe side, we + // will still scan the name for a NUL but only + // to the length provided in the structure. + // + // This is not technically the Linux semantics for + // abstract Unix domain sockets--they are supposed + // to be uninterpreted fixed-size binary blobs--but + // everyone uses this convention. + n := 0 + for n < int(pp.Len) && pp.Path[n] != 0 { + n++ + } + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if err != nil { + return + } + // TODO(neeilan): Remove 0 in call + sa, err = anyToSockaddr(0, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = int32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = int32(length) +} + +//sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys write(fd int, p []byte) (n int, err error) + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___SENDMSG_A +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP +//sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL + +//sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A +//sys Chdir(path string) (err error) = SYS___CHDIR_A +//sys Chown(path string, uid int, gid int) (err error) = SYS___CHOWN_A +//sys Chmod(path string, mode uint32) (err error) = SYS___CHMOD_A +//sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A +//sys Dup(oldfd int) (fd int, err error) +//sys Dup2(oldfd int, newfd int) (err error) +//sys Errno2() (er2 int) = SYS___ERRNO2 +//sys Err2ad() (eadd *int) = SYS___ERR2AD +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL +//sys fstat(fd int, stat *Stat_LE_t) (err error) + +func Fstat(fd int, stat *Stat_t) (err error) { + var statLE Stat_LE_t + err = fstat(fd, &statLE) + copyStat(stat, &statLE) + return +} + +//sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE +//sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT +//sys Msync(b []byte, flags int) (err error) = SYS_MSYNC +//sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL +//sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES +//sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT +//sys W_Getmntent_A(buff *byte, size int) (lastsys int, err error) = SYS___W_GETMNTENT_A + +//sys mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A +//sys unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys Chroot(path string) (err error) = SYS___CHROOT_A +//sys Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) = SYS_SELECT +//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A + +func Ptsname(fd int) (name string, err error) { + r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) + name = u2s(unsafe.Pointer(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func u2s(cstr unsafe.Pointer) string { + str := (*[1024]uint8)(cstr) + i := 0 + for str[i] != 0 { + i++ + } + return string(str[:i]) +} + +func Close(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + for i := 0; e1 == EAGAIN && i < 10; i++ { + _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) + _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + } + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +// Dummy function: there are no semantics for Madvise on z/OS +func Madvise(b []byte, advice int) (err error) { + return +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + +//sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpid() (pid int) +//sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID + +func Getpgrp() (pid int) { + pid, _ = Getpgid(0) + return +} + +//sysnb Getppid() (pid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_GETRLIMIT + +//sysnb getrusage(who int, rusage *rusage_zos) (err error) = SYS_GETRUSAGE + +func Getrusage(who int, rusage *Rusage) (err error) { + var ruz rusage_zos + err = getrusage(who, &ruz) + //Only the first two fields of Rusage are set + rusage.Utime.Sec = ruz.Utime.Sec + rusage.Utime.Usec = int64(ruz.Utime.Usec) + rusage.Stime.Sec = ruz.Stime.Sec + rusage.Stime.Usec = int64(ruz.Stime.Usec) + return +} + +//sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID +//sysnb Getuid() (uid int) +//sysnb Kill(pid int, sig Signal) (err error) +//sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A +//sys Link(path string, link string) (err error) = SYS___LINK_A +//sys Listen(s int, n int) (err error) +//sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A + +func Lstat(path string, stat *Stat_t) (err error) { + var statLE Stat_LE_t + err = lstat(path, &statLE) + copyStat(stat, &statLE) + return +} + +//sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A +//sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A +//sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A +//sys Rename(from string, to string) (err error) = SYS___RENAME_A +//sys Rmdir(path string) (err error) = SYS___RMDIR_A +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID +//sysnb Setrlimit(resource int, lim *Rlimit) (err error) +//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID +//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID +//sysnb Setsid() (pid int, err error) = SYS_SETSID +//sys Setuid(uid int) (err error) = SYS_SETUID +//sys Setgid(uid int) (err error) = SYS_SETGID +//sys Shutdown(fd int, how int) (err error) +//sys stat(path string, statLE *Stat_LE_t) (err error) = SYS___STAT_A + +func Stat(path string, sta *Stat_t) (err error) { + var statLE Stat_LE_t + err = stat(path, &statLE) + copyStat(sta, &statLE) + return +} + +//sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A +//sys Sync() = SYS_SYNC +//sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A +//sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR +//sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR +//sys Umask(mask int) (oldmask int) +//sys Unlink(path string) (err error) = SYS___UNLINK_A +//sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A + +//sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A + +func Open(path string, mode int, perm uint32) (fd int, err error) { + return open(path, mode, perm) +} + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + wd, err := Getwd() + if err != nil { + return err + } + + if err := Fchdir(dirfd); err != nil { + return err + } + defer Chdir(wd) + + return Mkfifo(path, mode) +} + +//sys remove(path string) (err error) + +func Remove(path string) error { + return remove(path) +} + +const ImplementsGetwd = true + +func Getcwd(buf []byte) (n int, err error) { + var p unsafe.Pointer + if len(buf) > 0 { + p = unsafe.Pointer(&buf[0]) + } else { + p = unsafe.Pointer(&_zero) + } + _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) + n = clen(buf) + 1 + if e != 0 { + err = errnoErr(e) + } + return +} + +func Getwd() (wd string, err error) { + var buf [PathMax]byte + n, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + // Getcwd returns the number of bytes written to buf, including the NUL. + if n < 1 || n > len(buf) || buf[n-1] != 0 { + return "", EINVAL + } + return string(buf[0 : n-1]), nil +} + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + if err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Sanity check group count. Max is 1<<16 on Linux. + if n < 0 || n > 1<<20 { + return nil, EINVAL + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if err != nil { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +func gettid() uint64 + +func Gettid() (tid int) { + return int(gettid()) +} + +type WaitStatus uint32 + +// Wait status is 7 bits at bottom, either 0 (exited), +// 0x7F (stopped), or a signal number that caused an exit. +// The 0x80 bit is whether there was a core dump. +// An extra number (exit code, signal causing a stop) +// is in the high bits. At least that's the idea. +// There are various irregularities. For example, the +// "continued" status is 0xFFFF, distinguishing itself +// from stopped via the core dump bit. + +const ( + mask = 0x7F + core = 0x80 + exited = 0x00 + stopped = 0x7F + shift = 8 +) + +func (w WaitStatus) Exited() bool { return w&mask == exited } + +func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited } + +func (w WaitStatus) Stopped() bool { return w&0xFF == stopped } + +func (w WaitStatus) Continued() bool { return w == 0xFFFF } + +func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } + +func (w WaitStatus) ExitStatus() int { + if !w.Exited() { + return -1 + } + return int(w>>shift) & 0xFF +} + +func (w WaitStatus) Signal() Signal { + if !w.Signaled() { + return -1 + } + return Signal(w & mask) +} + +func (w WaitStatus) StopSignal() Signal { + if !w.Stopped() { + return -1 + } + return Signal(w>>shift) & 0xFF +} + +func (w WaitStatus) TrapCause() int { return -1 } + +//sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) + +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. + // At the moment rusage will not be touched. + var status _C_int + wpid, err = waitpid(pid, &status, options) + if wstatus != nil { + *wstatus = WaitStatus(status) + } + return +} + +//sysnb gettimeofday(tv *timeval_zos) (err error) + +func Gettimeofday(tv *Timeval) (err error) { + var tvz timeval_zos + err = gettimeofday(&tvz) + tv.Sec = tvz.Sec + tv.Usec = int64(tvz.Usec) + return +} + +func Time(t *Time_t) (tt Time_t, err error) { + var tv Timeval + err = Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { //fix + return Timeval{Sec: sec, Usec: usec} +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A + +func Utimes(path string, tv []Timeval) (err error) { + if len(tv) != 2 { + return EINVAL + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func UtimesNano(path string, ts []Timespec) error { + if len(ts) != 2 { + return EINVAL + } + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := [2]Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + // TODO(neeilan) : Remove this 0 ( added to get sys/unix compiling on z/OS ) + return anyToSockaddr(0, &rsa) +} + +const ( + // identifier constants + nwmHeaderIdentifier = 0xd5e6d4c8 + nwmFilterIdentifier = 0xd5e6d4c6 + nwmTCPConnIdentifier = 0xd5e6d4c3 + nwmRecHeaderIdentifier = 0xd5e6d4d9 + nwmIPStatsIdentifier = 0xd5e6d4c9d7e2e340 + nwmIPGStatsIdentifier = 0xd5e6d4c9d7c7e2e3 + nwmTCPStatsIdentifier = 0xd5e6d4e3c3d7e2e3 + nwmUDPStatsIdentifier = 0xd5e6d4e4c4d7e2e3 + nwmICMPGStatsEntry = 0xd5e6d4c9c3d4d7c7 + nwmICMPTStatsEntry = 0xd5e6d4c9c3d4d7e3 + + // nwmHeader constants + nwmVersion1 = 1 + nwmVersion2 = 2 + nwmCurrentVer = 2 + + nwmTCPConnType = 1 + nwmGlobalStatsType = 14 + + // nwmFilter constants + nwmFilterLclAddrMask = 0x20000000 // Local address + nwmFilterSrcAddrMask = 0x20000000 // Source address + nwmFilterLclPortMask = 0x10000000 // Local port + nwmFilterSrcPortMask = 0x10000000 // Source port + + // nwmConnEntry constants + nwmTCPStateClosed = 1 + nwmTCPStateListen = 2 + nwmTCPStateSynSent = 3 + nwmTCPStateSynRcvd = 4 + nwmTCPStateEstab = 5 + nwmTCPStateFinWait1 = 6 + nwmTCPStateFinWait2 = 7 + nwmTCPStateClosWait = 8 + nwmTCPStateLastAck = 9 + nwmTCPStateClosing = 10 + nwmTCPStateTimeWait = 11 + nwmTCPStateDeletTCB = 12 + + // Existing constants on linux + BPF_TCP_CLOSE = 1 + BPF_TCP_LISTEN = 2 + BPF_TCP_SYN_SENT = 3 + BPF_TCP_SYN_RECV = 4 + BPF_TCP_ESTABLISHED = 5 + BPF_TCP_FIN_WAIT1 = 6 + BPF_TCP_FIN_WAIT2 = 7 + BPF_TCP_CLOSE_WAIT = 8 + BPF_TCP_LAST_ACK = 9 + BPF_TCP_CLOSING = 10 + BPF_TCP_TIME_WAIT = 11 + BPF_TCP_NEW_SYN_RECV = -1 + BPF_TCP_MAX_STATES = -2 +) + +type nwmTriplet struct { + offset uint32 + length uint32 + number uint32 +} + +type nwmQuadruplet struct { + offset uint32 + length uint32 + number uint32 + match uint32 +} + +type nwmHeader struct { + ident uint32 + length uint32 + version uint16 + nwmType uint16 + bytesNeeded uint32 + options uint32 + _ [16]byte + inputDesc nwmTriplet + outputDesc nwmQuadruplet +} + +type nwmFilter struct { + ident uint32 + flags uint32 + resourceName [8]byte + resourceId uint32 + listenerId uint32 + local [28]byte // union of sockaddr4 and sockaddr6 + remote [28]byte // union of sockaddr4 and sockaddr6 + _ uint16 + _ uint16 + asid uint16 + _ [2]byte + tnLuName [8]byte + tnMonGrp uint32 + tnAppl [8]byte + applData [40]byte + nInterface [16]byte + dVipa [16]byte + dVipaPfx uint16 + dVipaPort uint16 + dVipaFamily byte + _ [3]byte + destXCF [16]byte + destXCFPfx uint16 + destXCFFamily byte + _ [1]byte + targIP [16]byte + targIPPfx uint16 + targIPFamily byte + _ [1]byte + _ [20]byte +} + +type nwmRecHeader struct { + ident uint32 + length uint32 + number byte + _ [3]byte +} + +type nwmTCPStatsEntry struct { + ident uint64 + currEstab uint32 + activeOpened uint32 + passiveOpened uint32 + connClosed uint32 + estabResets uint32 + attemptFails uint32 + passiveDrops uint32 + timeWaitReused uint32 + inSegs uint64 + predictAck uint32 + predictData uint32 + inDupAck uint32 + inBadSum uint32 + inBadLen uint32 + inShort uint32 + inDiscOldTime uint32 + inAllBeforeWin uint32 + inSomeBeforeWin uint32 + inAllAfterWin uint32 + inSomeAfterWin uint32 + inOutOfOrder uint32 + inAfterClose uint32 + inWinProbes uint32 + inWinUpdates uint32 + outWinUpdates uint32 + outSegs uint64 + outDelayAcks uint32 + outRsts uint32 + retransSegs uint32 + retransTimeouts uint32 + retransDrops uint32 + pmtuRetrans uint32 + pmtuErrors uint32 + outWinProbes uint32 + probeDrops uint32 + keepAliveProbes uint32 + keepAliveDrops uint32 + finwait2Drops uint32 + acceptCount uint64 + inBulkQSegs uint64 + inDiscards uint64 + connFloods uint32 + connStalls uint32 + cfgEphemDef uint16 + ephemInUse uint16 + ephemHiWater uint16 + flags byte + _ [1]byte + ephemExhaust uint32 + smcRCurrEstabLnks uint32 + smcRLnkActTimeOut uint32 + smcRActLnkOpened uint32 + smcRPasLnkOpened uint32 + smcRLnksClosed uint32 + smcRCurrEstab uint32 + smcRActiveOpened uint32 + smcRPassiveOpened uint32 + smcRConnClosed uint32 + smcRInSegs uint64 + smcROutSegs uint64 + smcRInRsts uint32 + smcROutRsts uint32 + smcDCurrEstabLnks uint32 + smcDActLnkOpened uint32 + smcDPasLnkOpened uint32 + smcDLnksClosed uint32 + smcDCurrEstab uint32 + smcDActiveOpened uint32 + smcDPassiveOpened uint32 + smcDConnClosed uint32 + smcDInSegs uint64 + smcDOutSegs uint64 + smcDInRsts uint32 + smcDOutRsts uint32 +} + +type nwmConnEntry struct { + ident uint32 + local [28]byte // union of sockaddr4 and sockaddr6 + remote [28]byte // union of sockaddr4 and sockaddr6 + startTime [8]byte // uint64, changed to prevent padding from being inserted + lastActivity [8]byte // uint64 + bytesIn [8]byte // uint64 + bytesOut [8]byte // uint64 + inSegs [8]byte // uint64 + outSegs [8]byte // uint64 + state uint16 + activeOpen byte + flag01 byte + outBuffered uint32 + inBuffered uint32 + maxSndWnd uint32 + reXmtCount uint32 + congestionWnd uint32 + ssThresh uint32 + roundTripTime uint32 + roundTripVar uint32 + sendMSS uint32 + sndWnd uint32 + rcvBufSize uint32 + sndBufSize uint32 + outOfOrderCount uint32 + lcl0WindowCount uint32 + rmt0WindowCount uint32 + dupacks uint32 + flag02 byte + sockOpt6Cont byte + asid uint16 + resourceName [8]byte + resourceId uint32 + subtask uint32 + sockOpt byte + sockOpt6 byte + clusterConnFlag byte + proto byte + targetAppl [8]byte + luName [8]byte + clientUserId [8]byte + logMode [8]byte + timeStamp uint32 + timeStampAge uint32 + serverResourceId uint32 + intfName [16]byte + ttlsStatPol byte + ttlsStatConn byte + ttlsSSLProt uint16 + ttlsNegCiph [2]byte + ttlsSecType byte + ttlsFIPS140Mode byte + ttlsUserID [8]byte + applData [40]byte + inOldestTime [8]byte // uint64 + outOldestTime [8]byte // uint64 + tcpTrustedPartner byte + _ [3]byte + bulkDataIntfName [16]byte + ttlsNegCiph4 [4]byte + smcReason uint32 + lclSMCLinkId uint32 + rmtSMCLinkId uint32 + smcStatus byte + smcFlags byte + _ [2]byte + rcvWnd uint32 + lclSMCBufSz uint32 + rmtSMCBufSz uint32 + ttlsSessID [32]byte + ttlsSessIDLen int16 + _ [1]byte + smcDStatus byte + smcDReason uint32 +} + +var svcNameTable [][]byte = [][]byte{ + []byte("\xc5\xe9\xc2\xd5\xd4\xc9\xc6\xf4"), // svc_EZBNMIF4 +} + +const ( + svc_EZBNMIF4 = 0 +) + +func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { + jobname := []byte("\x5c\x40\x40\x40\x40\x40\x40\x40") // "*" + responseBuffer := [4096]byte{0} + var bufferAlet, reasonCode uint32 = 0, 0 + var bufferLen, returnValue, returnCode int32 = 4096, 0, 0 + + dsa := [18]uint64{0} + var argv [7]unsafe.Pointer + argv[0] = unsafe.Pointer(&jobname[0]) + argv[1] = unsafe.Pointer(&responseBuffer[0]) + argv[2] = unsafe.Pointer(&bufferAlet) + argv[3] = unsafe.Pointer(&bufferLen) + argv[4] = unsafe.Pointer(&returnValue) + argv[5] = unsafe.Pointer(&returnCode) + argv[6] = unsafe.Pointer(&reasonCode) + + request := (*struct { + header nwmHeader + filter nwmFilter + })(unsafe.Pointer(&responseBuffer[0])) + + EZBNMIF4 := svcLoad(&svcNameTable[svc_EZBNMIF4][0]) + if EZBNMIF4 == nil { + return nil, errnoErr(EINVAL) + } + + // GetGlobalStats EZBNMIF4 call + request.header.ident = nwmHeaderIdentifier + request.header.length = uint32(unsafe.Sizeof(request.header)) + request.header.version = nwmCurrentVer + request.header.nwmType = nwmGlobalStatsType + request.header.options = 0x80000000 + + svcCall(EZBNMIF4, &argv[0], &dsa[0]) + + // outputDesc field is filled by EZBNMIF4 on success + if returnCode != 0 || request.header.outputDesc.offset == 0 { + return nil, errnoErr(EINVAL) + } + + // Check that EZBNMIF4 returned a nwmRecHeader + recHeader := (*nwmRecHeader)(unsafe.Pointer(&responseBuffer[request.header.outputDesc.offset])) + if recHeader.ident != nwmRecHeaderIdentifier { + return nil, errnoErr(EINVAL) + } + + // Parse nwmTriplets to get offsets of returned entries + var sections []*uint64 + var sectionDesc *nwmTriplet = (*nwmTriplet)(unsafe.Pointer(&responseBuffer[0])) + for i := uint32(0); i < uint32(recHeader.number); i++ { + offset := request.header.outputDesc.offset + uint32(unsafe.Sizeof(*recHeader)) + i*uint32(unsafe.Sizeof(*sectionDesc)) + sectionDesc = (*nwmTriplet)(unsafe.Pointer(&responseBuffer[offset])) + for j := uint32(0); j < sectionDesc.number; j++ { + offset = request.header.outputDesc.offset + sectionDesc.offset + j*sectionDesc.length + sections = append(sections, (*uint64)(unsafe.Pointer(&responseBuffer[offset]))) + } + } + + // Find nwmTCPStatsEntry in returned entries + var tcpStats *nwmTCPStatsEntry = nil + for _, ptr := range sections { + switch *ptr { + case nwmTCPStatsIdentifier: + if tcpStats != nil { + return nil, errnoErr(EINVAL) + } + tcpStats = (*nwmTCPStatsEntry)(unsafe.Pointer(ptr)) + case nwmIPStatsIdentifier: + case nwmIPGStatsIdentifier: + case nwmUDPStatsIdentifier: + case nwmICMPGStatsEntry: + case nwmICMPTStatsEntry: + default: + return nil, errnoErr(EINVAL) + } + } + if tcpStats == nil { + return nil, errnoErr(EINVAL) + } + + // GetConnectionDetail EZBNMIF4 call + responseBuffer = [4096]byte{0} + dsa = [18]uint64{0} + bufferAlet, reasonCode = 0, 0 + bufferLen, returnValue, returnCode = 4096, 0, 0 + nameptr := (*uint32)(unsafe.Pointer(uintptr(0x21c))) // Get jobname of current process + nameptr = (*uint32)(unsafe.Pointer(uintptr(*nameptr + 12))) + argv[0] = unsafe.Pointer(uintptr(*nameptr)) + + request.header.ident = nwmHeaderIdentifier + request.header.length = uint32(unsafe.Sizeof(request.header)) + request.header.version = nwmCurrentVer + request.header.nwmType = nwmTCPConnType + request.header.options = 0x80000000 + + request.filter.ident = nwmFilterIdentifier + + var localSockaddr RawSockaddrAny + socklen := _Socklen(SizeofSockaddrAny) + err := getsockname(fd, &localSockaddr, &socklen) + if err != nil { + return nil, errnoErr(EINVAL) + } + if localSockaddr.Addr.Family == AF_INET { + localSockaddr := (*RawSockaddrInet4)(unsafe.Pointer(&localSockaddr.Addr)) + localSockFilter := (*RawSockaddrInet4)(unsafe.Pointer(&request.filter.local[0])) + localSockFilter.Family = AF_INET + var i int + for i = 0; i < 4; i++ { + if localSockaddr.Addr[i] != 0 { + break + } + } + if i != 4 { + request.filter.flags |= nwmFilterLclAddrMask + for i = 0; i < 4; i++ { + localSockFilter.Addr[i] = localSockaddr.Addr[i] + } + } + if localSockaddr.Port != 0 { + request.filter.flags |= nwmFilterLclPortMask + localSockFilter.Port = localSockaddr.Port + } + } else if localSockaddr.Addr.Family == AF_INET6 { + localSockaddr := (*RawSockaddrInet6)(unsafe.Pointer(&localSockaddr.Addr)) + localSockFilter := (*RawSockaddrInet6)(unsafe.Pointer(&request.filter.local[0])) + localSockFilter.Family = AF_INET6 + var i int + for i = 0; i < 16; i++ { + if localSockaddr.Addr[i] != 0 { + break + } + } + if i != 16 { + request.filter.flags |= nwmFilterLclAddrMask + for i = 0; i < 16; i++ { + localSockFilter.Addr[i] = localSockaddr.Addr[i] + } + } + if localSockaddr.Port != 0 { + request.filter.flags |= nwmFilterLclPortMask + localSockFilter.Port = localSockaddr.Port + } + } + + svcCall(EZBNMIF4, &argv[0], &dsa[0]) + + // outputDesc field is filled by EZBNMIF4 on success + if returnCode != 0 || request.header.outputDesc.offset == 0 { + return nil, errnoErr(EINVAL) + } + + // Check that EZBNMIF4 returned a nwmConnEntry + conn := (*nwmConnEntry)(unsafe.Pointer(&responseBuffer[request.header.outputDesc.offset])) + if conn.ident != nwmTCPConnIdentifier { + return nil, errnoErr(EINVAL) + } + + // Copy data from the returned data structures into tcpInfo + // Stats from nwmConnEntry are specific to that connection. + // Stats from nwmTCPStatsEntry are global (to the interface?) + // Fields may not be an exact match. Some fields have no equivalent. + var tcpinfo TCPInfo + tcpinfo.State = uint8(conn.state) + tcpinfo.Ca_state = 0 // dummy + tcpinfo.Retransmits = uint8(tcpStats.retransSegs) + tcpinfo.Probes = uint8(tcpStats.outWinProbes) + tcpinfo.Backoff = 0 // dummy + tcpinfo.Options = 0 // dummy + tcpinfo.Rto = tcpStats.retransTimeouts + tcpinfo.Ato = tcpStats.outDelayAcks + tcpinfo.Snd_mss = conn.sendMSS + tcpinfo.Rcv_mss = conn.sendMSS // dummy + tcpinfo.Unacked = 0 // dummy + tcpinfo.Sacked = 0 // dummy + tcpinfo.Lost = 0 // dummy + tcpinfo.Retrans = conn.reXmtCount + tcpinfo.Fackets = 0 // dummy + tcpinfo.Last_data_sent = uint32(*(*uint64)(unsafe.Pointer(&conn.lastActivity[0]))) + tcpinfo.Last_ack_sent = uint32(*(*uint64)(unsafe.Pointer(&conn.outOldestTime[0]))) + tcpinfo.Last_data_recv = uint32(*(*uint64)(unsafe.Pointer(&conn.inOldestTime[0]))) + tcpinfo.Last_ack_recv = uint32(*(*uint64)(unsafe.Pointer(&conn.inOldestTime[0]))) + tcpinfo.Pmtu = conn.sendMSS // dummy, NWMIfRouteMtu is a candidate + tcpinfo.Rcv_ssthresh = conn.ssThresh + tcpinfo.Rtt = conn.roundTripTime + tcpinfo.Rttvar = conn.roundTripVar + tcpinfo.Snd_ssthresh = conn.ssThresh // dummy + tcpinfo.Snd_cwnd = conn.congestionWnd + tcpinfo.Advmss = conn.sendMSS // dummy + tcpinfo.Reordering = 0 // dummy + tcpinfo.Rcv_rtt = conn.roundTripTime // dummy + tcpinfo.Rcv_space = conn.sendMSS // dummy + tcpinfo.Total_retrans = conn.reXmtCount + + svcUnload(&svcNameTable[svc_EZBNMIF4][0], EZBNMIF4) + + return &tcpinfo, nil +} + +// GetsockoptString returns the string value of the socket option opt for the +// socket associated with fd at the given socket level. +func GetsockoptString(fd, level, opt int) (string, error) { + buf := make([]byte, 256) + vallen := _Socklen(len(buf)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + if err != nil { + return "", err + } + + return string(buf[:vallen-1]), nil +} + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var msg Msghdr + var rsa RawSockaddrAny + msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Namelen = SizeofSockaddrAny + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = recvmsg(fd, &msg, flags); err != nil { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + // TODO(neeilan): Remove 0 arg added to get this compiling on z/OS + from, err = anyToSockaddr(0, &rsa) + } + return +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + var err error + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = int32(salen) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // send at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && len(p) == 0 { + n = 0 + } + return n, nil +} + +func Opendir(name string) (uintptr, error) { + p, err := BytePtrFromString(name) + if err != nil { + return 0, err + } + dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) + runtime.KeepAlive(unsafe.Pointer(p)) + if e != 0 { + err = errnoErr(e) + } + return dir, err +} + +// clearsyscall.Errno resets the errno value to 0. +func clearErrno() + +func Readdir(dir uintptr) (*Dirent, error) { + var ent Dirent + var res uintptr + // __readdir_r_a returns errno at the end of the directory stream, rather than 0. + // Therefore to avoid false positives we clear errno before calling it. + + // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" + //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. + + e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) + var err error + if e != 0 { + err = errnoErr(Errno(e)) + } + if res == 0 { + return nil, err + } + return &ent, err +} + +func Closedir(dir uintptr) error { + _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) + if e != 0 { + return errnoErr(e) + } + return nil +} + +func Seekdir(dir uintptr, pos int) { + _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) +} + +func Telldir(dir uintptr) (int, error) { + p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) + pos := int(p) + if pos == -1 { + return pos, errnoErr(e) + } + return pos, nil +} + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + // struct flock is packed on z/OS. We can't emulate that in Go so + // instead we pack it here. + var flock [24]byte + *(*int16)(unsafe.Pointer(&flock[0])) = lk.Type + *(*int16)(unsafe.Pointer(&flock[2])) = lk.Whence + *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start + *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len + *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid + _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) + lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) + lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) + lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) + lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) + if errno == 0 { + return nil + } + return errno +} + +func Flock(fd int, how int) error { + + var flock_type int16 + var fcntl_cmd int + + switch how { + case LOCK_SH | LOCK_NB: + flock_type = F_RDLCK + fcntl_cmd = F_SETLK + case LOCK_EX | LOCK_NB: + flock_type = F_WRLCK + fcntl_cmd = F_SETLK + case LOCK_EX: + flock_type = F_WRLCK + fcntl_cmd = F_SETLKW + case LOCK_UN: + flock_type = F_UNLCK + fcntl_cmd = F_SETLKW + default: + } + + flock := Flock_t{ + Type: int16(flock_type), + Whence: int16(0), + Start: int64(0), + Len: int64(0), + Pid: int32(Getppid()), + } + + err := FcntlFlock(uintptr(fd), fcntl_cmd, &flock) + return err +} + +func Mlock(b []byte) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Mlock2(b []byte, flags int) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Mlockall(flags int) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Munlock(b []byte) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Munlockall() (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func ClockGettime(clockid int32, ts *Timespec) error { + + var ticks_per_sec uint32 = 100 //TODO(kenan): value is currently hardcoded; need sysconf() call otherwise + var nsec_per_sec int64 = 1000000000 + + if ts == nil { + return EFAULT + } + if clockid == CLOCK_REALTIME || clockid == CLOCK_MONOTONIC { + var nanotime int64 = runtime.Nanotime1() + ts.Sec = nanotime / nsec_per_sec + ts.Nsec = nanotime % nsec_per_sec + } else if clockid == CLOCK_PROCESS_CPUTIME_ID || clockid == CLOCK_THREAD_CPUTIME_ID { + var tm Tms + _, err := Times(&tm) + if err != nil { + return EFAULT + } + ts.Sec = int64(tm.Utime / ticks_per_sec) + ts.Nsec = int64(tm.Utime) * nsec_per_sec / int64(ticks_per_sec) + } else { + return EINVAL + } + return nil +} + +func Statfs(path string, stat *Statfs_t) (err error) { + fd, err := open(path, O_RDONLY, 0) + defer Close(fd) + if err != nil { + return err + } + return Fstatfs(fd, stat) +} + +var ( + Stdin = 0 + Stdout = 1 + Stderr = 2 +) + +// Do the interface allocations only once for common +// Errno values. +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +var ( + signalNameMapOnce sync.Once + signalNameMap map[string]syscall.Signal +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e Errno) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + } + return e +} + +// ErrnoName returns the error name for error number e. +func ErrnoName(e Errno) string { + i := sort.Search(len(errorList), func(i int) bool { + return errorList[i].num >= e + }) + if i < len(errorList) && errorList[i].num == e { + return errorList[i].name + } + return "" +} + +// SignalName returns the signal name for signal number s. +func SignalName(s syscall.Signal) string { + i := sort.Search(len(signalList), func(i int) bool { + return signalList[i].num >= s + }) + if i < len(signalList) && signalList[i].num == s { + return signalList[i].name + } + return "" +} + +// SignalNum returns the syscall.Signal for signal named s, +// or 0 if a signal with such name is not found. +// The signal name should start with "SIG". +func SignalNum(s string) syscall.Signal { + signalNameMapOnce.Do(func() { + signalNameMap = make(map[string]syscall.Signal, len(signalList)) + for _, signal := range signalList { + signalNameMap[signal.name] = signal.num + } + }) + return signalNameMap[s] +} + +// clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte. +func clen(n []byte) int { + i := bytes.IndexByte(n, 0) + if i == -1 { + i = len(n) + } + return i +} + +// Mmap manager, for use by operating system-specific implementations. + +type mmapper struct { + sync.Mutex + active map[*byte][]byte // active mappings; key is last byte in mapping + mmap func(addr, length uintptr, prot, flags, fd int, offset int64) (uintptr, error) + munmap func(addr uintptr, length uintptr) error +} + +func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + if length <= 0 { + return nil, EINVAL + } + + // Map the requested memory. + addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) + if errno != nil { + return nil, errno + } + + // Slice memory layout + var sl = struct { + addr uintptr + len int + cap int + }{addr, length, length} + + // Use unsafe to turn sl into a []byte. + b := *(*[]byte)(unsafe.Pointer(&sl)) + + // Register mapping in m and return it. + p := &b[cap(b)-1] + m.Lock() + defer m.Unlock() + m.active[p] = b + return b, nil +} + +func (m *mmapper) Munmap(data []byte) (err error) { + if len(data) == 0 || len(data) != cap(data) { + return EINVAL + } + + // Find the base of the mapping. + p := &data[cap(data)-1] + m.Lock() + defer m.Unlock() + b := m.active[p] + if b == nil || &b[0] != &data[0] { + return EINVAL + } + + // Unmap the memory and update m. + if errno := m.munmap(uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))); errno != nil { + return errno + } + delete(m.active, p) + return nil +} + +func Read(fd int, p []byte) (n int, err error) { + n, err = read(fd, p) + if raceenabled { + if n > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), n) + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } + } + return +} + +func Write(fd int, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = write(fd, p) + if raceenabled && n > 0 { + raceReadRange(unsafe.Pointer(&p[0]), n) + } + return +} + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +// Sockaddr represents a socket address. +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs +} + +// SockaddrInet4 implements the Sockaddr interface for AF_INET type sockets. +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +// SockaddrInet6 implements the Sockaddr interface for AF_INET6 type sockets. +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +// SockaddrUnix implements the Sockaddr interface for AF_UNIX type sockets. +type SockaddrUnix struct { + Name string + raw RawSockaddrUnix +} + +func Bind(fd int, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd int, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func Getpeername(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getpeername(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(fd, &rsa) +} + +func GetsockoptByte(fd, level, opt int) (value byte, err error) { + var n byte + vallen := _Socklen(1) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + +func GetsockoptInt(fd, level, opt int) (value int, err error) { + var n int32 + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return int(n), err +} + +func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + return value, err +} + +func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { + var value IPMreq + vallen := _Socklen(SizeofIPMreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { + var value IPv6Mreq + vallen := _Socklen(SizeofIPv6Mreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { + var value IPv6MTUInfo + vallen := _Socklen(SizeofIPv6MTUInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { + var value ICMPv6Filter + vallen := _Socklen(SizeofICMPv6Filter) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptLinger(fd, level, opt int) (*Linger, error) { + var linger Linger + vallen := _Socklen(SizeofLinger) + err := getsockopt(fd, level, opt, unsafe.Pointer(&linger), &vallen) + return &linger, err +} + +func GetsockoptTimeval(fd, level, opt int) (*Timeval, error) { + var tv Timeval + vallen := _Socklen(unsafe.Sizeof(tv)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&tv), &vallen) + return &tv, err +} + +func GetsockoptUint64(fd, level, opt int) (value uint64, err error) { + var n uint64 + vallen := _Socklen(8) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + +func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if n, err = recvfrom(fd, p, flags, &rsa, &len); err != nil { + return + } + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + +func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { + ptr, n, err := to.sockaddr() + if err != nil { + return err + } + return sendto(fd, p, flags, ptr, n) +} + +func SetsockoptByte(fd, level, opt int, value byte) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value), 1) +} + +func SetsockoptInt(fd, level, opt int, value int) (err error) { + var n = int32(value) + return setsockopt(fd, level, opt, unsafe.Pointer(&n), 4) +} + +func SetsockoptInet4Addr(fd, level, opt int, value [4]byte) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value[0]), 4) +} + +func SetsockoptIPMreq(fd, level, opt int, mreq *IPMreq) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPMreq) +} + +func SetsockoptIPv6Mreq(fd, level, opt int, mreq *IPv6Mreq) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPv6Mreq) +} + +func SetsockoptICMPv6Filter(fd, level, opt int, filter *ICMPv6Filter) error { + return setsockopt(fd, level, opt, unsafe.Pointer(filter), SizeofICMPv6Filter) +} + +func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(l), SizeofLinger) +} + +func SetsockoptString(fd, level, opt int, s string) (err error) { + var p unsafe.Pointer + if len(s) > 0 { + p = unsafe.Pointer(&[]byte(s)[0]) + } + return setsockopt(fd, level, opt, p, uintptr(len(s))) +} + +func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv)) +} + +func SetsockoptUint64(fd, level, opt int, value uint64) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value), 8) +} + +func Socket(domain, typ, proto int) (fd int, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return -1, EAFNOSUPPORT + } + fd, err = socket(domain, typ, proto) + return +} + +func Socketpair(domain, typ, proto int) (fd [2]int, err error) { + var fdx [2]int32 + err = socketpair(domain, typ, proto, &fdx) + if err == nil { + fd[0] = int(fdx[0]) + fd[1] = int(fdx[1]) + } + return +} + +var ioSync int64 + +func CloseOnExec(fd int) { fcntl(fd, F_SETFD, FD_CLOEXEC) } + +func SetNonblock(fd int, nonblocking bool) (err error) { + flag, err := fcntl(fd, F_GETFL, 0) + if err != nil { + return err + } + if nonblocking { + flag |= O_NONBLOCK + } else { + flag &= ^O_NONBLOCK + } + _, err = fcntl(fd, F_SETFL, flag) + return err +} + +// Exec calls execve(2), which replaces the calling executable in the process +// tree. argv0 should be the full path to an executable ("/bin/ls") and the +// executable name should also be the first argument in argv (["ls", "-l"]). +// envv are the environment variables that should be passed to the new +// process (["USER=go", "PWD=/tmp"]). +func Exec(argv0 string, argv []string, envv []string) error { + return syscall.Exec(argv0, argv, envv) +} + +func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + if needspace := 8 - len(fstype); needspace <= 0 { + fstype = fstype[:8] + } else { + fstype += " "[:needspace] + } + return mount_LE(target, source, fstype, uint32(flags), int32(len(data)), data) +} + +func Unmount(name string, mtm int) (err error) { + // mountpoint is always a full path and starts with a '/' + // check if input string is not a mountpoint but a filesystem name + if name[0] != '/' { + return unmount(name, mtm) + } + // treat name as mountpoint + b2s := func(arr []byte) string { + nulli := bytes.IndexByte(arr, 0) + if nulli == -1 { + return string(arr) + } else { + return string(arr[:nulli]) + } + } + var buffer struct { + header W_Mnth + fsinfo [64]W_Mntent + } + fsCount, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) + if err != nil { + return err + } + if fsCount == 0 { + return EINVAL + } + for i := 0; i < fsCount; i++ { + if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { + err = unmount(b2s(buffer.fsinfo[i].Fsname[:]), mtm) + break + } + } + return err +} diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 103604299..3d8930405 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 30c1d71f4..25df1e378 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || netbsd // +build freebsd netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index 104994bc6..ca9799b79 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -1,6 +1,7 @@ // mkerrors.sh -maix32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc && aix // +build ppc,aix // Created by cgo -godefs - DO NOT EDIT diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 4fc8d3064..200c8c26f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -1,6 +1,7 @@ // mkerrors.sh -maix64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && aix // +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go deleted file mode 100644 index ec376f51b..000000000 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ /dev/null @@ -1,1788 +0,0 @@ -// mkerrors.sh -m32 -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build 386,darwin - -// Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -m32 _const.go - -package unix - -import "syscall" - -const ( - AF_APPLETALK = 0x10 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1c - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1e - AF_IPX = 0x17 - AF_ISDN = 0x1c - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x28 - AF_NATM = 0x1f - AF_NDRV = 0x1b - AF_NETBIOS = 0x21 - AF_NS = 0x6 - AF_OSI = 0x7 - AF_PPP = 0x22 - AF_PUP = 0x4 - AF_RESERVED_36 = 0x24 - AF_ROUTE = 0x11 - AF_SIP = 0x18 - AF_SNA = 0xb - AF_SYSTEM = 0x20 - AF_SYS_CONTROL = 0x2 - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_UTUN = 0x26 - ALTWERASE = 0x200 - ATTR_BIT_MAP_COUNT = 0x5 - ATTR_CMN_ACCESSMASK = 0x20000 - ATTR_CMN_ACCTIME = 0x1000 - ATTR_CMN_ADDEDTIME = 0x10000000 - ATTR_CMN_BKUPTIME = 0x2000 - ATTR_CMN_CHGTIME = 0x800 - ATTR_CMN_CRTIME = 0x200 - ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 - ATTR_CMN_DEVID = 0x2 - ATTR_CMN_DOCUMENT_ID = 0x100000 - ATTR_CMN_ERROR = 0x20000000 - ATTR_CMN_EXTENDED_SECURITY = 0x400000 - ATTR_CMN_FILEID = 0x2000000 - ATTR_CMN_FLAGS = 0x40000 - ATTR_CMN_FNDRINFO = 0x4000 - ATTR_CMN_FSID = 0x4 - ATTR_CMN_FULLPATH = 0x8000000 - ATTR_CMN_GEN_COUNT = 0x80000 - ATTR_CMN_GRPID = 0x10000 - ATTR_CMN_GRPUUID = 0x1000000 - ATTR_CMN_MODTIME = 0x400 - ATTR_CMN_NAME = 0x1 - ATTR_CMN_NAMEDATTRCOUNT = 0x80000 - ATTR_CMN_NAMEDATTRLIST = 0x100000 - ATTR_CMN_OBJID = 0x20 - ATTR_CMN_OBJPERMANENTID = 0x40 - ATTR_CMN_OBJTAG = 0x10 - ATTR_CMN_OBJTYPE = 0x8 - ATTR_CMN_OWNERID = 0x8000 - ATTR_CMN_PARENTID = 0x4000000 - ATTR_CMN_PAROBJID = 0x80 - ATTR_CMN_RETURNED_ATTRS = 0x80000000 - ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x41c7ff00 - ATTR_CMN_USERACCESS = 0x200000 - ATTR_CMN_UUID = 0x800000 - ATTR_CMN_VALIDMASK = 0xffffffff - ATTR_CMN_VOLSETMASK = 0x6700 - ATTR_FILE_ALLOCSIZE = 0x4 - ATTR_FILE_CLUMPSIZE = 0x10 - ATTR_FILE_DATAALLOCSIZE = 0x400 - ATTR_FILE_DATAEXTENTS = 0x800 - ATTR_FILE_DATALENGTH = 0x200 - ATTR_FILE_DEVTYPE = 0x20 - ATTR_FILE_FILETYPE = 0x40 - ATTR_FILE_FORKCOUNT = 0x80 - ATTR_FILE_FORKLIST = 0x100 - ATTR_FILE_IOBLOCKSIZE = 0x8 - ATTR_FILE_LINKCOUNT = 0x1 - ATTR_FILE_RSRCALLOCSIZE = 0x2000 - ATTR_FILE_RSRCEXTENTS = 0x4000 - ATTR_FILE_RSRCLENGTH = 0x1000 - ATTR_FILE_SETMASK = 0x20 - ATTR_FILE_TOTALSIZE = 0x2 - ATTR_FILE_VALIDMASK = 0x37ff - ATTR_VOL_ALLOCATIONCLUMP = 0x40 - ATTR_VOL_ATTRIBUTES = 0x40000000 - ATTR_VOL_CAPABILITIES = 0x20000 - ATTR_VOL_DIRCOUNT = 0x400 - ATTR_VOL_ENCODINGSUSED = 0x10000 - ATTR_VOL_FILECOUNT = 0x200 - ATTR_VOL_FSTYPE = 0x1 - ATTR_VOL_INFO = 0x80000000 - ATTR_VOL_IOBLOCKSIZE = 0x80 - ATTR_VOL_MAXOBJCOUNT = 0x800 - ATTR_VOL_MINALLOCATION = 0x20 - ATTR_VOL_MOUNTEDDEVICE = 0x8000 - ATTR_VOL_MOUNTFLAGS = 0x4000 - ATTR_VOL_MOUNTPOINT = 0x1000 - ATTR_VOL_NAME = 0x2000 - ATTR_VOL_OBJCOUNT = 0x100 - ATTR_VOL_QUOTA_SIZE = 0x10000000 - ATTR_VOL_RESERVED_SIZE = 0x20000000 - ATTR_VOL_SETMASK = 0x80002000 - ATTR_VOL_SIGNATURE = 0x2 - ATTR_VOL_SIZE = 0x4 - ATTR_VOL_SPACEAVAIL = 0x10 - ATTR_VOL_SPACEFREE = 0x8 - ATTR_VOL_UUID = 0x40000 - ATTR_VOL_VALIDMASK = 0xf007ffff - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B9600 = 0x2580 - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc00c4279 - BIOCGETIF = 0x4020426b - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4008426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCIMMEDIATE = 0x80044270 - BIOCPROMISC = 0x20004269 - BIOCSBLEN = 0xc0044266 - BIOCSDLT = 0x80044278 - BIOCSETF = 0x80084267 - BIOCSETFNR = 0x8008427e - BIOCSETIF = 0x8020426c - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8008426d - BIOCSSEESENT = 0x80044277 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CLONE_NOFOLLOW = 0x1 - CLONE_NOOWNERCOPY = 0x2 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTLIOCGINFO = 0xc0644e03 - CTL_HW = 0x6 - CTL_KERN = 0x1 - CTL_MAXNAME = 0xc - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0xf5 - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x12 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_SCCP = 0x8e - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf - EVFILT_FS = -0x9 - EVFILT_MACHPORT = -0x8 - EVFILT_PROC = -0x5 - EVFILT_READ = -0x1 - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xa - EVFILT_VM = -0xc - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG0 = 0x1000 - EV_FLAG1 = 0x2000 - EV_ONESHOT = 0x10 - EV_OOBAND = 0x2000 - EV_POLL = 0x1000 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 - EXTA = 0x4b00 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FSOPT_ATTR_CMN_EXTENDED = 0x20 - FSOPT_NOFOLLOW = 0x1 - FSOPT_NOINMEMUPDATE = 0x2 - FSOPT_PACK_INVAL_ATTRS = 0x8 - FSOPT_REPORT_FULLSIZE = 0x4 - F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_RETURN = 0x61 - F_ADDSIGS = 0x3b - F_ALLOCATEALL = 0x4 - F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 - F_CHKCLEAN = 0x29 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x43 - F_FINDSIGS = 0x4e - F_FLUSH_DATA = 0x28 - F_FREEZE_FS = 0x35 - F_FULLFSYNC = 0x33 - F_GETCODEDIR = 0x48 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0x7 - F_GETLKPID = 0x42 - F_GETNOSIGPIPE = 0x4a - F_GETOWN = 0x5 - F_GETPATH = 0x32 - F_GETPATH_MTMINFO = 0x47 - F_GETPROTECTIONCLASS = 0x3f - F_GETPROTECTIONLEVEL = 0x4d - F_GLOBAL_NOCACHE = 0x37 - F_LOG2PHYS = 0x31 - F_LOG2PHYS_EXT = 0x41 - F_NOCACHE = 0x30 - F_NODIRECT = 0x3e - F_OK = 0x0 - F_PATHPKG_CHECK = 0x34 - F_PEOFPOSMODE = 0x3 - F_PREALLOCATE = 0x2a - F_PUNCHHOLE = 0x63 - F_RDADVISE = 0x2c - F_RDAHEAD = 0x2d - F_RDLCK = 0x1 - F_SETBACKINGSTORE = 0x46 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0x8 - F_SETLKW = 0x9 - F_SETLKWTIMEOUT = 0xa - F_SETNOSIGPIPE = 0x49 - F_SETOWN = 0x6 - F_SETPROTECTIONCLASS = 0x40 - F_SETSIZE = 0x2b - F_SINGLE_WRITER = 0x4c - F_THAW_FS = 0x36 - F_TRANSCODEKEY = 0x4b - F_TRIM_ACTIVE_FILE = 0x64 - F_UNLCK = 0x2 - F_VOLPOSMODE = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - HW_MACHINE = 0x1 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_1822 = 0x2 - IFT_AAL5 = 0x31 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ATM = 0x25 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_CELLULAR = 0xff - IFT_CEPT = 0x13 - IFT_DS3 = 0x1e - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_ETHER = 0x6 - IFT_FAITH = 0x38 - IFT_FDDI = 0xf - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_GIF = 0x37 - IFT_HDH1822 = 0x3 - IFT_HIPPI = 0x2f - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IEEE1394 = 0x90 - IFT_IEEE8023ADLAG = 0x88 - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88026 = 0xa - IFT_L2VLAN = 0x87 - IFT_LAPB = 0x10 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_NSIP = 0x1b - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PDP = 0xff - IFT_PFLOG = 0xf5 - IFT_PFSYNC = 0xf6 - IFT_PKTAP = 0xfe - IFT_PPP = 0x17 - IFT_PROPMUX = 0x36 - IFT_PROPVIRTUAL = 0x35 - IFT_PTPSERIAL = 0x16 - IFT_RS232 = 0x21 - IFT_SDLC = 0x11 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_STARLAN = 0xb - IFT_STF = 0x39 - IFT_T1 = 0x12 - IFT_ULTRA = 0x1d - IFT_V35 = 0x2d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LINKLOCALNETNUM = 0xa9fe0000 - IN_LOOPBACKNET = 0x7f - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0xfe - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MAXID = 0x34 - IPPROTO_MEAS = 0x13 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEP = 0x21 - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_2292DSTOPTS = 0x17 - IPV6_2292HOPLIMIT = 0x14 - IPV6_2292HOPOPTS = 0x16 - IPV6_2292NEXTHOP = 0x15 - IPV6_2292PKTINFO = 0x13 - IPV6_2292PKTOPTIONS = 0x19 - IPV6_2292RTHDR = 0x18 - IPV6_BINDV6ONLY = 0x1b - IPV6_BOUND_IF = 0x7d - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_FAITH = 0x1d - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x300 - IPV6_FRAGTTL = 0x3c - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_RECVTCLASS = 0x23 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x24 - IPV6_UNICAST_HOPS = 0x4 - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BLOCK_SOURCE = 0x48 - IP_BOUND_IF = 0x19 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FAITH = 0x16 - IP_FW_ADD = 0x28 - IP_FW_DEL = 0x29 - IP_FW_FLUSH = 0x2a - IP_FW_GET = 0x2c - IP_FW_RESETLOG = 0x2d - IP_FW_ZERO = 0x2b - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MF = 0x2000 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_IFINDEX = 0x42 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_NAT__XXX = 0x37 - IP_OFFMASK = 0x1fff - IP_OLD_FW_ADD = 0x32 - IP_OLD_FW_DEL = 0x33 - IP_OLD_FW_FLUSH = 0x34 - IP_OLD_FW_GET = 0x36 - IP_OLD_FW_RESETLOG = 0x38 - IP_OLD_FW_ZERO = 0x35 - IP_OPTIONS = 0x1 - IP_PKTINFO = 0x1a - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVPKTINFO = 0x1a - IP_RECVRETOPTS = 0x6 - IP_RECVTOS = 0x1b - IP_RECVTTL = 0x18 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_STRIPHDR = 0x17 - IP_TOS = 0x3 - IP_TRAFFIC_MGT_BACKGROUND = 0x41 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - KERN_HOSTNAME = 0xa - KERN_OSRELEASE = 0x2 - KERN_OSTYPE = 0x1 - KERN_VERSION = 0x4 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_CAN_REUSE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_FREE_REUSABLE = 0x7 - MADV_FREE_REUSE = 0x8 - MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MADV_ZERO_WIRED_PAGES = 0x6 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_HASSEMAPHORE = 0x200 - MAP_JIT = 0x800 - MAP_NOCACHE = 0x400 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 - MAP_SHARED = 0x1 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_ROOTFS = 0x4000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff - MNT_WAIT = 0x1 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_FLUSH = 0x400 - MSG_HAVEMORE = 0x2000 - MSG_HOLD = 0x800 - MSG_NEEDSA = 0x10000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_RCVMORE = 0x4000 - MSG_SEND = 0x1000 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITSTREAM = 0x200 - MS_ASYNC = 0x1 - MS_DEACTIVATE = 0x8 - MS_INVALIDATE = 0x2 - MS_KILLPAGES = 0x4 - MS_SYNC = 0x10 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_DUMP2 = 0x7 - NET_RT_FLAGS = 0x2 - NET_RT_IFLIST = 0x3 - NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xa - NET_RT_STAT = 0x4 - NET_RT_TRASH = 0x5 - NFDBITS = 0x20 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ABSOLUTE = 0x8 - NOTE_ATTRIB = 0x8 - NOTE_BACKGROUND = 0x40 - NOTE_CHILD = 0x4 - NOTE_CRITICAL = 0x20 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXITSTATUS = 0x4000000 - NOTE_EXIT_CSERROR = 0x40000 - NOTE_EXIT_DECRYPTFAIL = 0x10000 - NOTE_EXIT_DETAIL = 0x2000000 - NOTE_EXIT_DETAIL_MASK = 0x70000 - NOTE_EXIT_MEMORY = 0x20000 - NOTE_EXIT_REPARENTED = 0x80000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 - NOTE_LEEWAY = 0x10 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MACH_CONTINUOUS_TIME = 0x80 - NOTE_NONE = 0x80 - NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 - NOTE_PCTRLMASK = -0x100000 - NOTE_PDATAMASK = 0xfffff - NOTE_REAP = 0x10000000 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_SIGNAL = 0x8000000 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x2 - NOTE_VM_ERROR = 0x10000000 - NOTE_VM_PRESSURE = 0x80000000 - NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 - NOTE_VM_PRESSURE_TERMINATE = 0x40000000 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - OFDEL = 0x20000 - OFILL = 0x80 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_ALERT = 0x20000000 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x1000000 - O_CREAT = 0x200 - O_DIRECTORY = 0x100000 - O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 - O_DSYNC = 0x400000 - O_EVTONLY = 0x8000 - O_EXCL = 0x800 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x20000 - O_NOFOLLOW = 0x100 - O_NONBLOCK = 0x4 - O_POPUP = 0x80000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYMLINK = 0x200000 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PT_ATTACH = 0xa - PT_ATTACHEXC = 0xe - PT_CONTINUE = 0x7 - PT_DENY_ATTACH = 0x1f - PT_DETACH = 0xb - PT_FIRSTMACH = 0x20 - PT_FORCEQUOTA = 0x1e - PT_KILL = 0x8 - PT_READ_D = 0x2 - PT_READ_I = 0x1 - PT_READ_U = 0x3 - PT_SIGEXC = 0xc - PT_STEP = 0x9 - PT_THUPDATE = 0xd - PT_TRACE_ME = 0x0 - PT_WRITE_D = 0x5 - PT_WRITE_I = 0x4 - PT_WRITE_U = 0x6 - RLIMIT_AS = 0x5 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_CPU_USAGE_MONITOR = 0x2 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_CLONING = 0x100 - RTF_CONDEMNED = 0x2000000 - RTF_DELCLONE = 0x80 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_IFREF = 0x4000000 - RTF_IFSCOPE = 0x1000000 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_NOIFREF = 0x2000 - RTF_PINNED = 0x100000 - RTF_PRCLONING = 0x10000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_PROXY = 0x8000000 - RTF_REJECT = 0x8 - RTF_ROUTER = 0x10000000 - RTF_STATIC = 0x800 - RTF_UP = 0x1 - RTF_WASCLONED = 0x20000 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_GET2 = 0x14 - RTM_IFINFO = 0xe - RTM_IFINFO2 = 0x12 - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_NEWMADDR2 = 0x13 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SCM_TIMESTAMP_MONOTONIC = 0x4 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCARPIPLL = 0xc0206928 - SIOCATMARK = 0x40047307 - SIOCAUTOADDR = 0xc0206926 - SIOCAUTONETMASK = 0x80206927 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFPHYADDR = 0x80206941 - SIOCGDRVSPEC = 0xc01c697b - SIOCGETVLAN = 0xc020697f - SIOCGHIWAT = 0x40047301 - SIOCGIFADDR = 0xc0206921 - SIOCGIFALTMTU = 0xc0206948 - SIOCGIFASYNCMAP = 0xc020697c - SIOCGIFBOND = 0xc0206947 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020695b - SIOCGIFCONF = 0xc0086924 - SIOCGIFDEVMTU = 0xc0206944 - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFGENERIC = 0xc020693a - SIOCGIFKPI = 0xc0206987 - SIOCGIFMAC = 0xc0206982 - SIOCGIFMEDIA = 0xc0286938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206940 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc020693f - SIOCGIFSTATUS = 0xc331693d - SIOCGIFVLAN = 0xc020697f - SIOCGIFWAKEFLAGS = 0xc0206988 - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCIFCREATE = 0xc0206978 - SIOCIFCREATE2 = 0xc020697a - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc00c6981 - SIOCRSLVMULTI = 0xc008693b - SIOCSDRVSPEC = 0x801c697b - SIOCSETVLAN = 0x8020697e - SIOCSHIWAT = 0x80047300 - SIOCSIFADDR = 0x8020690c - SIOCSIFALTMTU = 0x80206945 - SIOCSIFASYNCMAP = 0x8020697d - SIOCSIFBOND = 0x80206946 - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020695a - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFKPI = 0x80206986 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206983 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x8040693e - SIOCSIFPHYS = 0x80206936 - SIOCSIFVLAN = 0x8020697e - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_DONTTRUNC = 0x2000 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1010 - SO_LINGER = 0x80 - SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 - SO_NKE = 0x1021 - SO_NOADDRERR = 0x1023 - SO_NOSIGPIPE = 0x1022 - SO_NOTIFYCONFLICT = 0x1026 - SO_NP_EXTENSIONS = 0x1083 - SO_NREAD = 0x1020 - SO_NUMRCVPKT = 0x1112 - SO_NWRITE = 0x1024 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1011 - SO_RANDOMPORT = 0x1082 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_REUSESHAREUID = 0x1025 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TIMESTAMP_MONOTONIC = 0x800 - SO_TYPE = 0x1008 - SO_UPCALLCLOSEWAIT = 0x1027 - SO_USELOOPBACK = 0x40 - SO_WANTMORE = 0x4000 - SO_WANTOOBFLAG = 0x8000 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IFWHT = 0xe000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISTXT = 0x200 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 - TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 - TCP_KEEPALIVE = 0x10 - TCP_KEEPCNT = 0x102 - TCP_KEEPINTVL = 0x101 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MINMSS = 0xd8 - TCP_MSS = 0x200 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_NOTSENT_LOWAT = 0x201 - TCP_RXT_CONNDROPTIME = 0x80 - TCP_RXT_FINDROP = 0x100 - TCP_SENDMOREACKS = 0x103 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDCDTIMESTAMP = 0x40087458 - TIOCDRAIN = 0x2000745e - TIOCDSIMICROCODE = 0x20007455 - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x402c7413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGWINSZ = 0x40087468 - TIOCIXOFF = 0x20007480 - TIOCIXON = 0x20007481 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMODG = 0x40047403 - TIOCMODS = 0x80047404 - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTYGNAME = 0x40807453 - TIOCPTYGRANT = 0x20007454 - TIOCPTYUNLK = 0x20007452 - TIOCREMOTE = 0x80047469 - TIOCSBRK = 0x2000747b - TIOCSCONS = 0x20007463 - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x802c7414 - TIOCSETAF = 0x802c7416 - TIOCSETAW = 0x802c7415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2000745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40087459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x10 - WCOREFLAG = 0x80 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOWAIT = 0x20 - WORDSIZE = 0x20 - WSTOPPED = 0x8 - WUNTRACED = 0x2 - XATTR_CREATE = 0x2 - XATTR_NODEFAULT = 0x10 - XATTR_NOFOLLOW = 0x1 - XATTR_NOSECURITY = 0x8 - XATTR_REPLACE = 0x4 - XATTR_SHOWCOMPRESSION = 0x20 -) - -// Errors -const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) - EADDRINUSE = syscall.Errno(0x30) - EADDRNOTAVAIL = syscall.Errno(0x31) - EAFNOSUPPORT = syscall.Errno(0x2f) - EAGAIN = syscall.Errno(0x23) - EALREADY = syscall.Errno(0x25) - EAUTH = syscall.Errno(0x50) - EBADARCH = syscall.Errno(0x56) - EBADEXEC = syscall.Errno(0x55) - EBADF = syscall.Errno(0x9) - EBADMACHO = syscall.Errno(0x58) - EBADMSG = syscall.Errno(0x5e) - EBADRPC = syscall.Errno(0x48) - EBUSY = syscall.Errno(0x10) - ECANCELED = syscall.Errno(0x59) - ECHILD = syscall.Errno(0xa) - ECONNABORTED = syscall.Errno(0x35) - ECONNREFUSED = syscall.Errno(0x3d) - ECONNRESET = syscall.Errno(0x36) - EDEADLK = syscall.Errno(0xb) - EDESTADDRREQ = syscall.Errno(0x27) - EDEVERR = syscall.Errno(0x53) - EDOM = syscall.Errno(0x21) - EDQUOT = syscall.Errno(0x45) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) - EFTYPE = syscall.Errno(0x4f) - EHOSTDOWN = syscall.Errno(0x40) - EHOSTUNREACH = syscall.Errno(0x41) - EIDRM = syscall.Errno(0x5a) - EILSEQ = syscall.Errno(0x5c) - EINPROGRESS = syscall.Errno(0x24) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) - EISCONN = syscall.Errno(0x38) - EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x6a) - ELOOP = syscall.Errno(0x3e) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) - EMSGSIZE = syscall.Errno(0x28) - EMULTIHOP = syscall.Errno(0x5f) - ENAMETOOLONG = syscall.Errno(0x3f) - ENEEDAUTH = syscall.Errno(0x51) - ENETDOWN = syscall.Errno(0x32) - ENETRESET = syscall.Errno(0x34) - ENETUNREACH = syscall.Errno(0x33) - ENFILE = syscall.Errno(0x17) - ENOATTR = syscall.Errno(0x5d) - ENOBUFS = syscall.Errno(0x37) - ENODATA = syscall.Errno(0x60) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) - ENOLCK = syscall.Errno(0x4d) - ENOLINK = syscall.Errno(0x61) - ENOMEM = syscall.Errno(0xc) - ENOMSG = syscall.Errno(0x5b) - ENOPOLICY = syscall.Errno(0x67) - ENOPROTOOPT = syscall.Errno(0x2a) - ENOSPC = syscall.Errno(0x1c) - ENOSR = syscall.Errno(0x62) - ENOSTR = syscall.Errno(0x63) - ENOSYS = syscall.Errno(0x4e) - ENOTBLK = syscall.Errno(0xf) - ENOTCONN = syscall.Errno(0x39) - ENOTDIR = syscall.Errno(0x14) - ENOTEMPTY = syscall.Errno(0x42) - ENOTRECOVERABLE = syscall.Errno(0x68) - ENOTSOCK = syscall.Errno(0x26) - ENOTSUP = syscall.Errno(0x2d) - ENOTTY = syscall.Errno(0x19) - ENXIO = syscall.Errno(0x6) - EOPNOTSUPP = syscall.Errno(0x66) - EOVERFLOW = syscall.Errno(0x54) - EOWNERDEAD = syscall.Errno(0x69) - EPERM = syscall.Errno(0x1) - EPFNOSUPPORT = syscall.Errno(0x2e) - EPIPE = syscall.Errno(0x20) - EPROCLIM = syscall.Errno(0x43) - EPROCUNAVAIL = syscall.Errno(0x4c) - EPROGMISMATCH = syscall.Errno(0x4b) - EPROGUNAVAIL = syscall.Errno(0x4a) - EPROTO = syscall.Errno(0x64) - EPROTONOSUPPORT = syscall.Errno(0x2b) - EPROTOTYPE = syscall.Errno(0x29) - EPWROFF = syscall.Errno(0x52) - EQFULL = syscall.Errno(0x6a) - ERANGE = syscall.Errno(0x22) - EREMOTE = syscall.Errno(0x47) - EROFS = syscall.Errno(0x1e) - ERPCMISMATCH = syscall.Errno(0x49) - ESHLIBVERS = syscall.Errno(0x57) - ESHUTDOWN = syscall.Errno(0x3a) - ESOCKTNOSUPPORT = syscall.Errno(0x2c) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) - ESTALE = syscall.Errno(0x46) - ETIME = syscall.Errno(0x65) - ETIMEDOUT = syscall.Errno(0x3c) - ETOOMANYREFS = syscall.Errno(0x3b) - ETXTBSY = syscall.Errno(0x1a) - EUSERS = syscall.Errno(0x44) - EWOULDBLOCK = syscall.Errno(0x23) - EXDEV = syscall.Errno(0x12) -) - -// Signals -const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) - SIGBUS = syscall.Signal(0xa) - SIGCHLD = syscall.Signal(0x14) - SIGCONT = syscall.Signal(0x13) - SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINFO = syscall.Signal(0x1d) - SIGINT = syscall.Signal(0x2) - SIGIO = syscall.Signal(0x17) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) - SIGPROF = syscall.Signal(0x1b) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) - SIGSTOP = syscall.Signal(0x11) - SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) - SIGTSTP = syscall.Signal(0x12) - SIGTTIN = syscall.Signal(0x15) - SIGTTOU = syscall.Signal(0x16) - SIGURG = syscall.Signal(0x10) - SIGUSR1 = syscall.Signal(0x1e) - SIGUSR2 = syscall.Signal(0x1f) - SIGVTALRM = syscall.Signal(0x1a) - SIGWINCH = syscall.Signal(0x1c) - SIGXCPU = syscall.Signal(0x18) - SIGXFSZ = syscall.Signal(0x19) -) - -// Error table -var errorList = [...]struct { - num syscall.Errno - name string - desc string -}{ - {1, "EPERM", "operation not permitted"}, - {2, "ENOENT", "no such file or directory"}, - {3, "ESRCH", "no such process"}, - {4, "EINTR", "interrupted system call"}, - {5, "EIO", "input/output error"}, - {6, "ENXIO", "device not configured"}, - {7, "E2BIG", "argument list too long"}, - {8, "ENOEXEC", "exec format error"}, - {9, "EBADF", "bad file descriptor"}, - {10, "ECHILD", "no child processes"}, - {11, "EDEADLK", "resource deadlock avoided"}, - {12, "ENOMEM", "cannot allocate memory"}, - {13, "EACCES", "permission denied"}, - {14, "EFAULT", "bad address"}, - {15, "ENOTBLK", "block device required"}, - {16, "EBUSY", "resource busy"}, - {17, "EEXIST", "file exists"}, - {18, "EXDEV", "cross-device link"}, - {19, "ENODEV", "operation not supported by device"}, - {20, "ENOTDIR", "not a directory"}, - {21, "EISDIR", "is a directory"}, - {22, "EINVAL", "invalid argument"}, - {23, "ENFILE", "too many open files in system"}, - {24, "EMFILE", "too many open files"}, - {25, "ENOTTY", "inappropriate ioctl for device"}, - {26, "ETXTBSY", "text file busy"}, - {27, "EFBIG", "file too large"}, - {28, "ENOSPC", "no space left on device"}, - {29, "ESPIPE", "illegal seek"}, - {30, "EROFS", "read-only file system"}, - {31, "EMLINK", "too many links"}, - {32, "EPIPE", "broken pipe"}, - {33, "EDOM", "numerical argument out of domain"}, - {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, - {36, "EINPROGRESS", "operation now in progress"}, - {37, "EALREADY", "operation already in progress"}, - {38, "ENOTSOCK", "socket operation on non-socket"}, - {39, "EDESTADDRREQ", "destination address required"}, - {40, "EMSGSIZE", "message too long"}, - {41, "EPROTOTYPE", "protocol wrong type for socket"}, - {42, "ENOPROTOOPT", "protocol not available"}, - {43, "EPROTONOSUPPORT", "protocol not supported"}, - {44, "ESOCKTNOSUPPORT", "socket type not supported"}, - {45, "ENOTSUP", "operation not supported"}, - {46, "EPFNOSUPPORT", "protocol family not supported"}, - {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, - {48, "EADDRINUSE", "address already in use"}, - {49, "EADDRNOTAVAIL", "can't assign requested address"}, - {50, "ENETDOWN", "network is down"}, - {51, "ENETUNREACH", "network is unreachable"}, - {52, "ENETRESET", "network dropped connection on reset"}, - {53, "ECONNABORTED", "software caused connection abort"}, - {54, "ECONNRESET", "connection reset by peer"}, - {55, "ENOBUFS", "no buffer space available"}, - {56, "EISCONN", "socket is already connected"}, - {57, "ENOTCONN", "socket is not connected"}, - {58, "ESHUTDOWN", "can't send after socket shutdown"}, - {59, "ETOOMANYREFS", "too many references: can't splice"}, - {60, "ETIMEDOUT", "operation timed out"}, - {61, "ECONNREFUSED", "connection refused"}, - {62, "ELOOP", "too many levels of symbolic links"}, - {63, "ENAMETOOLONG", "file name too long"}, - {64, "EHOSTDOWN", "host is down"}, - {65, "EHOSTUNREACH", "no route to host"}, - {66, "ENOTEMPTY", "directory not empty"}, - {67, "EPROCLIM", "too many processes"}, - {68, "EUSERS", "too many users"}, - {69, "EDQUOT", "disc quota exceeded"}, - {70, "ESTALE", "stale NFS file handle"}, - {71, "EREMOTE", "too many levels of remote in path"}, - {72, "EBADRPC", "RPC struct is bad"}, - {73, "ERPCMISMATCH", "RPC version wrong"}, - {74, "EPROGUNAVAIL", "RPC prog. not avail"}, - {75, "EPROGMISMATCH", "program version wrong"}, - {76, "EPROCUNAVAIL", "bad procedure for program"}, - {77, "ENOLCK", "no locks available"}, - {78, "ENOSYS", "function not implemented"}, - {79, "EFTYPE", "inappropriate file type or format"}, - {80, "EAUTH", "authentication error"}, - {81, "ENEEDAUTH", "need authenticator"}, - {82, "EPWROFF", "device power is off"}, - {83, "EDEVERR", "device error"}, - {84, "EOVERFLOW", "value too large to be stored in data type"}, - {85, "EBADEXEC", "bad executable (or shared library)"}, - {86, "EBADARCH", "bad CPU type in executable"}, - {87, "ESHLIBVERS", "shared library version mismatch"}, - {88, "EBADMACHO", "malformed Mach-o file"}, - {89, "ECANCELED", "operation canceled"}, - {90, "EIDRM", "identifier removed"}, - {91, "ENOMSG", "no message of desired type"}, - {92, "EILSEQ", "illegal byte sequence"}, - {93, "ENOATTR", "attribute not found"}, - {94, "EBADMSG", "bad message"}, - {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, - {96, "ENODATA", "no message available on STREAM"}, - {97, "ENOLINK", "ENOLINK (Reserved)"}, - {98, "ENOSR", "no STREAM resources"}, - {99, "ENOSTR", "not a STREAM"}, - {100, "EPROTO", "protocol error"}, - {101, "ETIME", "STREAM ioctl timeout"}, - {102, "EOPNOTSUPP", "operation not supported on socket"}, - {103, "ENOPOLICY", "policy not found"}, - {104, "ENOTRECOVERABLE", "state not recoverable"}, - {105, "EOWNERDEAD", "previous owner died"}, - {106, "EQFULL", "interface output queue is full"}, -} - -// Signal table -var signalList = [...]struct { - num syscall.Signal - name string - desc string -}{ - {1, "SIGHUP", "hangup"}, - {2, "SIGINT", "interrupt"}, - {3, "SIGQUIT", "quit"}, - {4, "SIGILL", "illegal instruction"}, - {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, - {7, "SIGEMT", "EMT trap"}, - {8, "SIGFPE", "floating point exception"}, - {9, "SIGKILL", "killed"}, - {10, "SIGBUS", "bus error"}, - {11, "SIGSEGV", "segmentation fault"}, - {12, "SIGSYS", "bad system call"}, - {13, "SIGPIPE", "broken pipe"}, - {14, "SIGALRM", "alarm clock"}, - {15, "SIGTERM", "terminated"}, - {16, "SIGURG", "urgent I/O condition"}, - {17, "SIGSTOP", "suspended (signal)"}, - {18, "SIGTSTP", "suspended"}, - {19, "SIGCONT", "continued"}, - {20, "SIGCHLD", "child exited"}, - {21, "SIGTTIN", "stopped (tty input)"}, - {22, "SIGTTOU", "stopped (tty output)"}, - {23, "SIGIO", "I/O possible"}, - {24, "SIGXCPU", "cputime limit exceeded"}, - {25, "SIGXFSZ", "filesize limit exceeded"}, - {26, "SIGVTALRM", "virtual timer expired"}, - {27, "SIGPROF", "profiling timer expired"}, - {28, "SIGWINCH", "window size changes"}, - {29, "SIGINFO", "information request"}, - {30, "SIGUSR1", "user defined signal 1"}, - {31, "SIGUSR2", "user defined signal 2"}, -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index fea5dfaad..991996b60 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && darwin // +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -32,7 +33,7 @@ const ( AF_LAT = 0xe AF_LINK = 0x12 AF_LOCAL = 0x1 - AF_MAX = 0x28 + AF_MAX = 0x29 AF_NATM = 0x1f AF_NDRV = 0x1b AF_NETBIOS = 0x21 @@ -49,6 +50,7 @@ const ( AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 + AF_VSOCK = 0x28 ALTWERASE = 0x200 ATTR_BIT_MAP_COUNT = 0x5 ATTR_CMN_ACCESSMASK = 0x20000 @@ -83,7 +85,7 @@ const ( ATTR_CMN_PAROBJID = 0x80 ATTR_CMN_RETURNED_ATTRS = 0x80000000 ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x41c7ff00 + ATTR_CMN_SETMASK = 0x51c7ff00 ATTR_CMN_USERACCESS = 0x200000 ATTR_CMN_UUID = 0x800000 ATTR_CMN_VALIDMASK = 0xffffffff @@ -357,7 +359,7 @@ const ( DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MAX = 0x10a DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -398,6 +400,7 @@ const ( DLT_SYMANTEC_FIREWALL = 0x63 DLT_TZSP = 0x80 DLT_USB = 0xba + DLT_USB_DARWIN = 0x10a DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc DLT_USER0 = 0x93 @@ -442,8 +445,8 @@ const ( EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf + EVFILT_SYSCOUNT = 0x11 + EVFILT_THREADMARKER = 0x11 EVFILT_TIMER = -0x7 EVFILT_USER = -0xa EVFILT_VM = -0xc @@ -481,9 +484,12 @@ const ( FSOPT_NOINMEMUPDATE = 0x2 FSOPT_PACK_INVAL_ATTRS = 0x8 FSOPT_REPORT_FULLSIZE = 0x4 + FSOPT_RETURN_REALDEV = 0x200 F_ADDFILESIGS = 0x3d F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_INFO = 0x67 F_ADDFILESIGS_RETURN = 0x61 + F_ADDFILESUPPL = 0x68 F_ADDSIGS = 0x3b F_ALLOCATEALL = 0x4 F_ALLOCATECONTIG = 0x2 @@ -505,8 +511,10 @@ const ( F_GETOWN = 0x5 F_GETPATH = 0x32 F_GETPATH_MTMINFO = 0x47 + F_GETPATH_NOFIRMLINK = 0x66 F_GETPROTECTIONCLASS = 0x3f F_GETPROTECTIONLEVEL = 0x4d + F_GETSIGSINFO = 0x69 F_GLOBAL_NOCACHE = 0x37 F_LOG2PHYS = 0x31 F_LOG2PHYS_EXT = 0x41 @@ -531,6 +539,7 @@ const ( F_SETPROTECTIONCLASS = 0x40 F_SETSIZE = 0x2b F_SINGLE_WRITER = 0x4c + F_SPECULATIVE_READ = 0x65 F_THAW_FS = 0x36 F_TRANSCODEKEY = 0x4b F_TRIM_ACTIVE_FILE = 0x64 @@ -562,6 +571,7 @@ const ( IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 + IFT_6LOWPAN = 0x40 IFT_AAL5 = 0x31 IFT_ARCNET = 0x23 IFT_ARCNETPLUS = 0x24 @@ -766,16 +776,28 @@ const ( IPV6_2292PKTINFO = 0x13 IPV6_2292PKTOPTIONS = 0x19 IPV6_2292RTHDR = 0x18 + IPV6_3542DSTOPTS = 0x32 + IPV6_3542HOPLIMIT = 0x2f + IPV6_3542HOPOPTS = 0x31 + IPV6_3542NEXTHOP = 0x30 + IPV6_3542PKTINFO = 0x2e + IPV6_3542RTHDR = 0x33 + IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 + IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 + IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 + IPV6_AUTOFLOWLABEL = 0x3b IPV6_BINDV6ONLY = 0x1b IPV6_BOUND_IF = 0x7d IPV6_CHECKSUM = 0x1a IPV6_DEFAULT_MULTICAST_HOPS = 0x1 IPV6_DEFAULT_MULTICAST_LOOP = 0x1 IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 IPV6_FAITH = 0x1d IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x300 + IPV6_FLOW_ECN_MASK = 0x3000 IPV6_FRAGTTL = 0x3c IPV6_FW_ADD = 0x1e IPV6_FW_DEL = 0x1f @@ -783,6 +805,8 @@ const ( IPV6_FW_GET = 0x22 IPV6_FW_ZERO = 0x21 IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 IPV6_IPSEC_POLICY = 0x1c IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd @@ -794,20 +818,34 @@ const ( IPV6_MAX_SOCK_SRC_FILTER = 0x80 IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe IPV6_PORTRANGE_DEFAULT = 0x0 IPV6_PORTRANGE_HIGH = 0x1 IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x3d + IPV6_RECVRTHDR = 0x26 IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x39 IPV6_RTHDR_LOOSE = 0x0 IPV6_RTHDR_STRICT = 0x1 IPV6_RTHDR_TYPE_0 = 0x0 IPV6_SOCKOPT_RESERVED1 = 0x3 IPV6_TCLASS = 0x24 IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -818,6 +856,7 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 + IP_DONTFRAG = 0x1c IP_DROP_MEMBERSHIP = 0xd IP_DROP_SOURCE_MEMBERSHIP = 0x47 IP_DUMMYNET_CONFIGURE = 0x3c @@ -889,6 +928,12 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_PEERCRED = 0x1 + LOCAL_PEEREPID = 0x3 + LOCAL_PEEREUUID = 0x5 + LOCAL_PEERPID = 0x2 + LOCAL_PEERTOKEN = 0x6 + LOCAL_PEERUUID = 0x4 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -904,6 +949,7 @@ const ( MADV_SEQUENTIAL = 0x2 MADV_WILLNEED = 0x3 MADV_ZERO_WIRED_PAGES = 0x6 + MAP_32BIT = 0x8000 MAP_ANON = 0x1000 MAP_ANONYMOUS = 0x1000 MAP_COPY = 0x2 @@ -920,6 +966,17 @@ const ( MAP_RESILIENT_CODESIGN = 0x2000 MAP_RESILIENT_MEDIA = 0x4000 MAP_SHARED = 0x1 + MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 + MAP_UNIX03 = 0x40000 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ASYNC = 0x40 @@ -931,6 +988,7 @@ const ( MNT_DOVOLFS = 0x8000 MNT_DWAIT = 0x4 MNT_EXPORTED = 0x100 + MNT_EXT_ROOT_DATA_VOL = 0x1 MNT_FORCE = 0x80000 MNT_IGNORE_OWNERSHIP = 0x200000 MNT_JOURNALED = 0x800000 @@ -947,12 +1005,15 @@ const ( MNT_QUOTA = 0x2000 MNT_RDONLY = 0x1 MNT_RELOAD = 0x40000 + MNT_REMOVABLE = 0x200 MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x40000000 + MNT_STRICTATIME = 0x80000000 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 MNT_UNKNOWNPERMISSIONS = 0x200000 MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff + MNT_VISFLAGMASK = 0xd7f0f7ff MNT_WAIT = 0x1 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 @@ -963,6 +1024,7 @@ const ( MSG_HAVEMORE = 0x2000 MSG_HOLD = 0x800 MSG_NEEDSA = 0x10000 + MSG_NOSIGNAL = 0x80000 MSG_OOB = 0x1 MSG_PEEK = 0x2 MSG_RCVMORE = 0x4000 @@ -979,9 +1041,10 @@ const ( NET_RT_DUMP = 0x1 NET_RT_DUMP2 = 0x7 NET_RT_FLAGS = 0x2 + NET_RT_FLAGS_PRIV = 0xa NET_RT_IFLIST = 0x3 NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xa + NET_RT_MAXID = 0xb NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 NFDBITS = 0x20 @@ -1019,6 +1082,7 @@ const ( NOTE_LEEWAY = 0x10 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_MACHTIME = 0x100 NOTE_MACH_CONTINUOUS_TIME = 0x80 NOTE_NONE = 0x80 NOTE_NSECONDS = 0x4 @@ -1065,6 +1129,7 @@ const ( O_NDELAY = 0x4 O_NOCTTY = 0x20000 O_NOFOLLOW = 0x100 + O_NOFOLLOW_ANY = 0x20000000 O_NONBLOCK = 0x4 O_POPUP = 0x80000000 O_RDONLY = 0x0 @@ -1136,6 +1201,7 @@ const ( RTF_BROADCAST = 0x400000 RTF_CLONING = 0x100 RTF_CONDEMNED = 0x2000000 + RTF_DEAD = 0x20000000 RTF_DELCLONE = 0x80 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 @@ -1143,6 +1209,7 @@ const ( RTF_HOST = 0x4 RTF_IFREF = 0x4000000 RTF_IFSCOPE = 0x1000000 + RTF_LLDATA = 0x400 RTF_LLINFO = 0x400 RTF_LOCAL = 0x200000 RTF_MODIFIED = 0x20 @@ -1210,6 +1277,7 @@ const ( SIOCGDRVSPEC = 0xc028697b SIOCGETVLAN = 0xc020697f SIOCGHIWAT = 0x40047301 + SIOCGIF6LOWPAN = 0xc02069c5 SIOCGIFADDR = 0xc0206921 SIOCGIFALTMTU = 0xc0206948 SIOCGIFASYNCMAP = 0xc020697c @@ -1220,6 +1288,7 @@ const ( SIOCGIFDEVMTU = 0xc0206944 SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFLAGS = 0xc0206911 + SIOCGIFFUNCTIONALTYPE = 0xc02069ad SIOCGIFGENERIC = 0xc020693a SIOCGIFKPI = 0xc0206987 SIOCGIFMAC = 0xc0206982 @@ -1233,6 +1302,7 @@ const ( SIOCGIFSTATUS = 0xc331693d SIOCGIFVLAN = 0xc020697f SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGIFXMEDIA = 0xc02c6948 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCIFCREATE = 0xc0206978 @@ -1243,6 +1313,7 @@ const ( SIOCSDRVSPEC = 0x8028697b SIOCSETVLAN = 0x8020697e SIOCSHIWAT = 0x80047300 + SIOCSIF6LOWPAN = 0x802069c4 SIOCSIFADDR = 0x8020690c SIOCSIFALTMTU = 0x80206945 SIOCSIFASYNCMAP = 0x8020697d @@ -1270,6 +1341,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go deleted file mode 100644 index 03feefbf8..000000000 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ /dev/null @@ -1,1788 +0,0 @@ -// mkerrors.sh -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build arm,darwin - -// Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- _const.go - -package unix - -import "syscall" - -const ( - AF_APPLETALK = 0x10 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1c - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1e - AF_IPX = 0x17 - AF_ISDN = 0x1c - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x28 - AF_NATM = 0x1f - AF_NDRV = 0x1b - AF_NETBIOS = 0x21 - AF_NS = 0x6 - AF_OSI = 0x7 - AF_PPP = 0x22 - AF_PUP = 0x4 - AF_RESERVED_36 = 0x24 - AF_ROUTE = 0x11 - AF_SIP = 0x18 - AF_SNA = 0xb - AF_SYSTEM = 0x20 - AF_SYS_CONTROL = 0x2 - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_UTUN = 0x26 - ALTWERASE = 0x200 - ATTR_BIT_MAP_COUNT = 0x5 - ATTR_CMN_ACCESSMASK = 0x20000 - ATTR_CMN_ACCTIME = 0x1000 - ATTR_CMN_ADDEDTIME = 0x10000000 - ATTR_CMN_BKUPTIME = 0x2000 - ATTR_CMN_CHGTIME = 0x800 - ATTR_CMN_CRTIME = 0x200 - ATTR_CMN_DATA_PROTECT_FLAGS = 0x40000000 - ATTR_CMN_DEVID = 0x2 - ATTR_CMN_DOCUMENT_ID = 0x100000 - ATTR_CMN_ERROR = 0x20000000 - ATTR_CMN_EXTENDED_SECURITY = 0x400000 - ATTR_CMN_FILEID = 0x2000000 - ATTR_CMN_FLAGS = 0x40000 - ATTR_CMN_FNDRINFO = 0x4000 - ATTR_CMN_FSID = 0x4 - ATTR_CMN_FULLPATH = 0x8000000 - ATTR_CMN_GEN_COUNT = 0x80000 - ATTR_CMN_GRPID = 0x10000 - ATTR_CMN_GRPUUID = 0x1000000 - ATTR_CMN_MODTIME = 0x400 - ATTR_CMN_NAME = 0x1 - ATTR_CMN_NAMEDATTRCOUNT = 0x80000 - ATTR_CMN_NAMEDATTRLIST = 0x100000 - ATTR_CMN_OBJID = 0x20 - ATTR_CMN_OBJPERMANENTID = 0x40 - ATTR_CMN_OBJTAG = 0x10 - ATTR_CMN_OBJTYPE = 0x8 - ATTR_CMN_OWNERID = 0x8000 - ATTR_CMN_PARENTID = 0x4000000 - ATTR_CMN_PAROBJID = 0x80 - ATTR_CMN_RETURNED_ATTRS = 0x80000000 - ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x41c7ff00 - ATTR_CMN_USERACCESS = 0x200000 - ATTR_CMN_UUID = 0x800000 - ATTR_CMN_VALIDMASK = 0xffffffff - ATTR_CMN_VOLSETMASK = 0x6700 - ATTR_FILE_ALLOCSIZE = 0x4 - ATTR_FILE_CLUMPSIZE = 0x10 - ATTR_FILE_DATAALLOCSIZE = 0x400 - ATTR_FILE_DATAEXTENTS = 0x800 - ATTR_FILE_DATALENGTH = 0x200 - ATTR_FILE_DEVTYPE = 0x20 - ATTR_FILE_FILETYPE = 0x40 - ATTR_FILE_FORKCOUNT = 0x80 - ATTR_FILE_FORKLIST = 0x100 - ATTR_FILE_IOBLOCKSIZE = 0x8 - ATTR_FILE_LINKCOUNT = 0x1 - ATTR_FILE_RSRCALLOCSIZE = 0x2000 - ATTR_FILE_RSRCEXTENTS = 0x4000 - ATTR_FILE_RSRCLENGTH = 0x1000 - ATTR_FILE_SETMASK = 0x20 - ATTR_FILE_TOTALSIZE = 0x2 - ATTR_FILE_VALIDMASK = 0x37ff - ATTR_VOL_ALLOCATIONCLUMP = 0x40 - ATTR_VOL_ATTRIBUTES = 0x40000000 - ATTR_VOL_CAPABILITIES = 0x20000 - ATTR_VOL_DIRCOUNT = 0x400 - ATTR_VOL_ENCODINGSUSED = 0x10000 - ATTR_VOL_FILECOUNT = 0x200 - ATTR_VOL_FSTYPE = 0x1 - ATTR_VOL_INFO = 0x80000000 - ATTR_VOL_IOBLOCKSIZE = 0x80 - ATTR_VOL_MAXOBJCOUNT = 0x800 - ATTR_VOL_MINALLOCATION = 0x20 - ATTR_VOL_MOUNTEDDEVICE = 0x8000 - ATTR_VOL_MOUNTFLAGS = 0x4000 - ATTR_VOL_MOUNTPOINT = 0x1000 - ATTR_VOL_NAME = 0x2000 - ATTR_VOL_OBJCOUNT = 0x100 - ATTR_VOL_QUOTA_SIZE = 0x10000000 - ATTR_VOL_RESERVED_SIZE = 0x20000000 - ATTR_VOL_SETMASK = 0x80002000 - ATTR_VOL_SIGNATURE = 0x2 - ATTR_VOL_SIZE = 0x4 - ATTR_VOL_SPACEAVAIL = 0x10 - ATTR_VOL_SPACEFREE = 0x8 - ATTR_VOL_UUID = 0x40000 - ATTR_VOL_VALIDMASK = 0xf007ffff - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B9600 = 0x2580 - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc00c4279 - BIOCGETIF = 0x4020426b - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4010426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCIMMEDIATE = 0x80044270 - BIOCPROMISC = 0x20004269 - BIOCSBLEN = 0xc0044266 - BIOCSDLT = 0x80044278 - BIOCSETF = 0x80104267 - BIOCSETFNR = 0x8010427e - BIOCSETIF = 0x8020426c - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8010426d - BIOCSSEESENT = 0x80044277 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CLONE_NOFOLLOW = 0x1 - CLONE_NOOWNERCOPY = 0x2 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTLIOCGINFO = 0xc0644e03 - CTL_HW = 0x6 - CTL_KERN = 0x1 - CTL_MAXNAME = 0xc - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0xf5 - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x12 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_SCCP = 0x8e - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf - EVFILT_FS = -0x9 - EVFILT_MACHPORT = -0x8 - EVFILT_PROC = -0x5 - EVFILT_READ = -0x1 - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xa - EVFILT_VM = -0xc - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG0 = 0x1000 - EV_FLAG1 = 0x2000 - EV_ONESHOT = 0x10 - EV_OOBAND = 0x2000 - EV_POLL = 0x1000 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 - EXTA = 0x4b00 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FSOPT_ATTR_CMN_EXTENDED = 0x20 - FSOPT_NOFOLLOW = 0x1 - FSOPT_NOINMEMUPDATE = 0x2 - FSOPT_PACK_INVAL_ATTRS = 0x8 - FSOPT_REPORT_FULLSIZE = 0x4 - F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_RETURN = 0x61 - F_ADDSIGS = 0x3b - F_ALLOCATEALL = 0x4 - F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 - F_CHKCLEAN = 0x29 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x43 - F_FINDSIGS = 0x4e - F_FLUSH_DATA = 0x28 - F_FREEZE_FS = 0x35 - F_FULLFSYNC = 0x33 - F_GETCODEDIR = 0x48 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0x7 - F_GETLKPID = 0x42 - F_GETNOSIGPIPE = 0x4a - F_GETOWN = 0x5 - F_GETPATH = 0x32 - F_GETPATH_MTMINFO = 0x47 - F_GETPROTECTIONCLASS = 0x3f - F_GETPROTECTIONLEVEL = 0x4d - F_GLOBAL_NOCACHE = 0x37 - F_LOG2PHYS = 0x31 - F_LOG2PHYS_EXT = 0x41 - F_NOCACHE = 0x30 - F_NODIRECT = 0x3e - F_OK = 0x0 - F_PATHPKG_CHECK = 0x34 - F_PEOFPOSMODE = 0x3 - F_PREALLOCATE = 0x2a - F_PUNCHHOLE = 0x63 - F_RDADVISE = 0x2c - F_RDAHEAD = 0x2d - F_RDLCK = 0x1 - F_SETBACKINGSTORE = 0x46 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0x8 - F_SETLKW = 0x9 - F_SETLKWTIMEOUT = 0xa - F_SETNOSIGPIPE = 0x49 - F_SETOWN = 0x6 - F_SETPROTECTIONCLASS = 0x40 - F_SETSIZE = 0x2b - F_SINGLE_WRITER = 0x4c - F_THAW_FS = 0x36 - F_TRANSCODEKEY = 0x4b - F_TRIM_ACTIVE_FILE = 0x64 - F_UNLCK = 0x2 - F_VOLPOSMODE = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - HW_MACHINE = 0x1 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_1822 = 0x2 - IFT_AAL5 = 0x31 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ATM = 0x25 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_CELLULAR = 0xff - IFT_CEPT = 0x13 - IFT_DS3 = 0x1e - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_ETHER = 0x6 - IFT_FAITH = 0x38 - IFT_FDDI = 0xf - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_GIF = 0x37 - IFT_HDH1822 = 0x3 - IFT_HIPPI = 0x2f - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IEEE1394 = 0x90 - IFT_IEEE8023ADLAG = 0x88 - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88026 = 0xa - IFT_L2VLAN = 0x87 - IFT_LAPB = 0x10 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_NSIP = 0x1b - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PDP = 0xff - IFT_PFLOG = 0xf5 - IFT_PFSYNC = 0xf6 - IFT_PKTAP = 0xfe - IFT_PPP = 0x17 - IFT_PROPMUX = 0x36 - IFT_PROPVIRTUAL = 0x35 - IFT_PTPSERIAL = 0x16 - IFT_RS232 = 0x21 - IFT_SDLC = 0x11 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_STARLAN = 0xb - IFT_STF = 0x39 - IFT_T1 = 0x12 - IFT_ULTRA = 0x1d - IFT_V35 = 0x2d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LINKLOCALNETNUM = 0xa9fe0000 - IN_LOOPBACKNET = 0x7f - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0xfe - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MAXID = 0x34 - IPPROTO_MEAS = 0x13 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEP = 0x21 - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_2292DSTOPTS = 0x17 - IPV6_2292HOPLIMIT = 0x14 - IPV6_2292HOPOPTS = 0x16 - IPV6_2292NEXTHOP = 0x15 - IPV6_2292PKTINFO = 0x13 - IPV6_2292PKTOPTIONS = 0x19 - IPV6_2292RTHDR = 0x18 - IPV6_BINDV6ONLY = 0x1b - IPV6_BOUND_IF = 0x7d - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_FAITH = 0x1d - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x300 - IPV6_FRAGTTL = 0x3c - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_RECVTCLASS = 0x23 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x24 - IPV6_UNICAST_HOPS = 0x4 - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BLOCK_SOURCE = 0x48 - IP_BOUND_IF = 0x19 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FAITH = 0x16 - IP_FW_ADD = 0x28 - IP_FW_DEL = 0x29 - IP_FW_FLUSH = 0x2a - IP_FW_GET = 0x2c - IP_FW_RESETLOG = 0x2d - IP_FW_ZERO = 0x2b - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MF = 0x2000 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_IFINDEX = 0x42 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_NAT__XXX = 0x37 - IP_OFFMASK = 0x1fff - IP_OLD_FW_ADD = 0x32 - IP_OLD_FW_DEL = 0x33 - IP_OLD_FW_FLUSH = 0x34 - IP_OLD_FW_GET = 0x36 - IP_OLD_FW_RESETLOG = 0x38 - IP_OLD_FW_ZERO = 0x35 - IP_OPTIONS = 0x1 - IP_PKTINFO = 0x1a - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVPKTINFO = 0x1a - IP_RECVRETOPTS = 0x6 - IP_RECVTOS = 0x1b - IP_RECVTTL = 0x18 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_STRIPHDR = 0x17 - IP_TOS = 0x3 - IP_TRAFFIC_MGT_BACKGROUND = 0x41 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - KERN_HOSTNAME = 0xa - KERN_OSRELEASE = 0x2 - KERN_OSTYPE = 0x1 - KERN_VERSION = 0x4 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_CAN_REUSE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_FREE_REUSABLE = 0x7 - MADV_FREE_REUSE = 0x8 - MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MADV_ZERO_WIRED_PAGES = 0x6 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_HASSEMAPHORE = 0x200 - MAP_JIT = 0x800 - MAP_NOCACHE = 0x400 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 - MAP_SHARED = 0x1 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_ROOTFS = 0x4000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff - MNT_WAIT = 0x1 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_FLUSH = 0x400 - MSG_HAVEMORE = 0x2000 - MSG_HOLD = 0x800 - MSG_NEEDSA = 0x10000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_RCVMORE = 0x4000 - MSG_SEND = 0x1000 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITSTREAM = 0x200 - MS_ASYNC = 0x1 - MS_DEACTIVATE = 0x8 - MS_INVALIDATE = 0x2 - MS_KILLPAGES = 0x4 - MS_SYNC = 0x10 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_DUMP2 = 0x7 - NET_RT_FLAGS = 0x2 - NET_RT_IFLIST = 0x3 - NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xa - NET_RT_STAT = 0x4 - NET_RT_TRASH = 0x5 - NFDBITS = 0x20 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ABSOLUTE = 0x8 - NOTE_ATTRIB = 0x8 - NOTE_BACKGROUND = 0x40 - NOTE_CHILD = 0x4 - NOTE_CRITICAL = 0x20 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXITSTATUS = 0x4000000 - NOTE_EXIT_CSERROR = 0x40000 - NOTE_EXIT_DECRYPTFAIL = 0x10000 - NOTE_EXIT_DETAIL = 0x2000000 - NOTE_EXIT_DETAIL_MASK = 0x70000 - NOTE_EXIT_MEMORY = 0x20000 - NOTE_EXIT_REPARENTED = 0x80000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 - NOTE_LEEWAY = 0x10 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MACH_CONTINUOUS_TIME = 0x80 - NOTE_NONE = 0x80 - NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 - NOTE_PCTRLMASK = -0x100000 - NOTE_PDATAMASK = 0xfffff - NOTE_REAP = 0x10000000 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_SIGNAL = 0x8000000 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x2 - NOTE_VM_ERROR = 0x10000000 - NOTE_VM_PRESSURE = 0x80000000 - NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000 - NOTE_VM_PRESSURE_TERMINATE = 0x40000000 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - OFDEL = 0x20000 - OFILL = 0x80 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_ALERT = 0x20000000 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x1000000 - O_CREAT = 0x200 - O_DIRECTORY = 0x100000 - O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 - O_DSYNC = 0x400000 - O_EVTONLY = 0x8000 - O_EXCL = 0x800 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x20000 - O_NOFOLLOW = 0x100 - O_NONBLOCK = 0x4 - O_POPUP = 0x80000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYMLINK = 0x200000 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PT_ATTACH = 0xa - PT_ATTACHEXC = 0xe - PT_CONTINUE = 0x7 - PT_DENY_ATTACH = 0x1f - PT_DETACH = 0xb - PT_FIRSTMACH = 0x20 - PT_FORCEQUOTA = 0x1e - PT_KILL = 0x8 - PT_READ_D = 0x2 - PT_READ_I = 0x1 - PT_READ_U = 0x3 - PT_SIGEXC = 0xc - PT_STEP = 0x9 - PT_THUPDATE = 0xd - PT_TRACE_ME = 0x0 - PT_WRITE_D = 0x5 - PT_WRITE_I = 0x4 - PT_WRITE_U = 0x6 - RLIMIT_AS = 0x5 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_CPU_USAGE_MONITOR = 0x2 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_CLONING = 0x100 - RTF_CONDEMNED = 0x2000000 - RTF_DELCLONE = 0x80 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_IFREF = 0x4000000 - RTF_IFSCOPE = 0x1000000 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_NOIFREF = 0x2000 - RTF_PINNED = 0x100000 - RTF_PRCLONING = 0x10000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_PROXY = 0x8000000 - RTF_REJECT = 0x8 - RTF_ROUTER = 0x10000000 - RTF_STATIC = 0x800 - RTF_UP = 0x1 - RTF_WASCLONED = 0x20000 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_GET2 = 0x14 - RTM_IFINFO = 0xe - RTM_IFINFO2 = 0x12 - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_NEWMADDR2 = 0x13 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SCM_TIMESTAMP_MONOTONIC = 0x4 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCARPIPLL = 0xc0206928 - SIOCATMARK = 0x40047307 - SIOCAUTOADDR = 0xc0206926 - SIOCAUTONETMASK = 0x80206927 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFPHYADDR = 0x80206941 - SIOCGDRVSPEC = 0xc028697b - SIOCGETVLAN = 0xc020697f - SIOCGHIWAT = 0x40047301 - SIOCGIFADDR = 0xc0206921 - SIOCGIFALTMTU = 0xc0206948 - SIOCGIFASYNCMAP = 0xc020697c - SIOCGIFBOND = 0xc0206947 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020695b - SIOCGIFCONF = 0xc00c6924 - SIOCGIFDEVMTU = 0xc0206944 - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFGENERIC = 0xc020693a - SIOCGIFKPI = 0xc0206987 - SIOCGIFMAC = 0xc0206982 - SIOCGIFMEDIA = 0xc02c6938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206940 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc020693f - SIOCGIFSTATUS = 0xc331693d - SIOCGIFVLAN = 0xc020697f - SIOCGIFWAKEFLAGS = 0xc0206988 - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCIFCREATE = 0xc0206978 - SIOCIFCREATE2 = 0xc020697a - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc0106981 - SIOCRSLVMULTI = 0xc010693b - SIOCSDRVSPEC = 0x8028697b - SIOCSETVLAN = 0x8020697e - SIOCSHIWAT = 0x80047300 - SIOCSIFADDR = 0x8020690c - SIOCSIFALTMTU = 0x80206945 - SIOCSIFASYNCMAP = 0x8020697d - SIOCSIFBOND = 0x80206946 - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020695a - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFKPI = 0x80206986 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206983 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x8040693e - SIOCSIFPHYS = 0x80206936 - SIOCSIFVLAN = 0x8020697e - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_DONTTRUNC = 0x2000 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1010 - SO_LINGER = 0x80 - SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 - SO_NKE = 0x1021 - SO_NOADDRERR = 0x1023 - SO_NOSIGPIPE = 0x1022 - SO_NOTIFYCONFLICT = 0x1026 - SO_NP_EXTENSIONS = 0x1083 - SO_NREAD = 0x1020 - SO_NUMRCVPKT = 0x1112 - SO_NWRITE = 0x1024 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1011 - SO_RANDOMPORT = 0x1082 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_REUSESHAREUID = 0x1025 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TIMESTAMP_MONOTONIC = 0x800 - SO_TYPE = 0x1008 - SO_UPCALLCLOSEWAIT = 0x1027 - SO_USELOOPBACK = 0x40 - SO_WANTMORE = 0x4000 - SO_WANTOOBFLAG = 0x8000 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IFWHT = 0xe000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISTXT = 0x200 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 - TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 - TCP_KEEPALIVE = 0x10 - TCP_KEEPCNT = 0x102 - TCP_KEEPINTVL = 0x101 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MINMSS = 0xd8 - TCP_MSS = 0x200 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_NOTSENT_LOWAT = 0x201 - TCP_RXT_CONNDROPTIME = 0x80 - TCP_RXT_FINDROP = 0x100 - TCP_SENDMOREACKS = 0x103 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDCDTIMESTAMP = 0x40107458 - TIOCDRAIN = 0x2000745e - TIOCDSIMICROCODE = 0x20007455 - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x40487413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGWINSZ = 0x40087468 - TIOCIXOFF = 0x20007480 - TIOCIXON = 0x20007481 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMODG = 0x40047403 - TIOCMODS = 0x80047404 - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTYGNAME = 0x40807453 - TIOCPTYGRANT = 0x20007454 - TIOCPTYUNLK = 0x20007452 - TIOCREMOTE = 0x80047469 - TIOCSBRK = 0x2000747b - TIOCSCONS = 0x20007463 - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x80487414 - TIOCSETAF = 0x80487416 - TIOCSETAW = 0x80487415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2000745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40107459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x10 - WCOREFLAG = 0x80 - WEXITED = 0x4 - WNOHANG = 0x1 - WNOWAIT = 0x20 - WORDSIZE = 0x40 - WSTOPPED = 0x8 - WUNTRACED = 0x2 - XATTR_CREATE = 0x2 - XATTR_NODEFAULT = 0x10 - XATTR_NOFOLLOW = 0x1 - XATTR_NOSECURITY = 0x8 - XATTR_REPLACE = 0x4 - XATTR_SHOWCOMPRESSION = 0x20 -) - -// Errors -const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) - EADDRINUSE = syscall.Errno(0x30) - EADDRNOTAVAIL = syscall.Errno(0x31) - EAFNOSUPPORT = syscall.Errno(0x2f) - EAGAIN = syscall.Errno(0x23) - EALREADY = syscall.Errno(0x25) - EAUTH = syscall.Errno(0x50) - EBADARCH = syscall.Errno(0x56) - EBADEXEC = syscall.Errno(0x55) - EBADF = syscall.Errno(0x9) - EBADMACHO = syscall.Errno(0x58) - EBADMSG = syscall.Errno(0x5e) - EBADRPC = syscall.Errno(0x48) - EBUSY = syscall.Errno(0x10) - ECANCELED = syscall.Errno(0x59) - ECHILD = syscall.Errno(0xa) - ECONNABORTED = syscall.Errno(0x35) - ECONNREFUSED = syscall.Errno(0x3d) - ECONNRESET = syscall.Errno(0x36) - EDEADLK = syscall.Errno(0xb) - EDESTADDRREQ = syscall.Errno(0x27) - EDEVERR = syscall.Errno(0x53) - EDOM = syscall.Errno(0x21) - EDQUOT = syscall.Errno(0x45) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) - EFTYPE = syscall.Errno(0x4f) - EHOSTDOWN = syscall.Errno(0x40) - EHOSTUNREACH = syscall.Errno(0x41) - EIDRM = syscall.Errno(0x5a) - EILSEQ = syscall.Errno(0x5c) - EINPROGRESS = syscall.Errno(0x24) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) - EISCONN = syscall.Errno(0x38) - EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x6a) - ELOOP = syscall.Errno(0x3e) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) - EMSGSIZE = syscall.Errno(0x28) - EMULTIHOP = syscall.Errno(0x5f) - ENAMETOOLONG = syscall.Errno(0x3f) - ENEEDAUTH = syscall.Errno(0x51) - ENETDOWN = syscall.Errno(0x32) - ENETRESET = syscall.Errno(0x34) - ENETUNREACH = syscall.Errno(0x33) - ENFILE = syscall.Errno(0x17) - ENOATTR = syscall.Errno(0x5d) - ENOBUFS = syscall.Errno(0x37) - ENODATA = syscall.Errno(0x60) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) - ENOLCK = syscall.Errno(0x4d) - ENOLINK = syscall.Errno(0x61) - ENOMEM = syscall.Errno(0xc) - ENOMSG = syscall.Errno(0x5b) - ENOPOLICY = syscall.Errno(0x67) - ENOPROTOOPT = syscall.Errno(0x2a) - ENOSPC = syscall.Errno(0x1c) - ENOSR = syscall.Errno(0x62) - ENOSTR = syscall.Errno(0x63) - ENOSYS = syscall.Errno(0x4e) - ENOTBLK = syscall.Errno(0xf) - ENOTCONN = syscall.Errno(0x39) - ENOTDIR = syscall.Errno(0x14) - ENOTEMPTY = syscall.Errno(0x42) - ENOTRECOVERABLE = syscall.Errno(0x68) - ENOTSOCK = syscall.Errno(0x26) - ENOTSUP = syscall.Errno(0x2d) - ENOTTY = syscall.Errno(0x19) - ENXIO = syscall.Errno(0x6) - EOPNOTSUPP = syscall.Errno(0x66) - EOVERFLOW = syscall.Errno(0x54) - EOWNERDEAD = syscall.Errno(0x69) - EPERM = syscall.Errno(0x1) - EPFNOSUPPORT = syscall.Errno(0x2e) - EPIPE = syscall.Errno(0x20) - EPROCLIM = syscall.Errno(0x43) - EPROCUNAVAIL = syscall.Errno(0x4c) - EPROGMISMATCH = syscall.Errno(0x4b) - EPROGUNAVAIL = syscall.Errno(0x4a) - EPROTO = syscall.Errno(0x64) - EPROTONOSUPPORT = syscall.Errno(0x2b) - EPROTOTYPE = syscall.Errno(0x29) - EPWROFF = syscall.Errno(0x52) - EQFULL = syscall.Errno(0x6a) - ERANGE = syscall.Errno(0x22) - EREMOTE = syscall.Errno(0x47) - EROFS = syscall.Errno(0x1e) - ERPCMISMATCH = syscall.Errno(0x49) - ESHLIBVERS = syscall.Errno(0x57) - ESHUTDOWN = syscall.Errno(0x3a) - ESOCKTNOSUPPORT = syscall.Errno(0x2c) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) - ESTALE = syscall.Errno(0x46) - ETIME = syscall.Errno(0x65) - ETIMEDOUT = syscall.Errno(0x3c) - ETOOMANYREFS = syscall.Errno(0x3b) - ETXTBSY = syscall.Errno(0x1a) - EUSERS = syscall.Errno(0x44) - EWOULDBLOCK = syscall.Errno(0x23) - EXDEV = syscall.Errno(0x12) -) - -// Signals -const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) - SIGBUS = syscall.Signal(0xa) - SIGCHLD = syscall.Signal(0x14) - SIGCONT = syscall.Signal(0x13) - SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINFO = syscall.Signal(0x1d) - SIGINT = syscall.Signal(0x2) - SIGIO = syscall.Signal(0x17) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) - SIGPROF = syscall.Signal(0x1b) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) - SIGSTOP = syscall.Signal(0x11) - SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) - SIGTSTP = syscall.Signal(0x12) - SIGTTIN = syscall.Signal(0x15) - SIGTTOU = syscall.Signal(0x16) - SIGURG = syscall.Signal(0x10) - SIGUSR1 = syscall.Signal(0x1e) - SIGUSR2 = syscall.Signal(0x1f) - SIGVTALRM = syscall.Signal(0x1a) - SIGWINCH = syscall.Signal(0x1c) - SIGXCPU = syscall.Signal(0x18) - SIGXFSZ = syscall.Signal(0x19) -) - -// Error table -var errorList = [...]struct { - num syscall.Errno - name string - desc string -}{ - {1, "EPERM", "operation not permitted"}, - {2, "ENOENT", "no such file or directory"}, - {3, "ESRCH", "no such process"}, - {4, "EINTR", "interrupted system call"}, - {5, "EIO", "input/output error"}, - {6, "ENXIO", "device not configured"}, - {7, "E2BIG", "argument list too long"}, - {8, "ENOEXEC", "exec format error"}, - {9, "EBADF", "bad file descriptor"}, - {10, "ECHILD", "no child processes"}, - {11, "EDEADLK", "resource deadlock avoided"}, - {12, "ENOMEM", "cannot allocate memory"}, - {13, "EACCES", "permission denied"}, - {14, "EFAULT", "bad address"}, - {15, "ENOTBLK", "block device required"}, - {16, "EBUSY", "resource busy"}, - {17, "EEXIST", "file exists"}, - {18, "EXDEV", "cross-device link"}, - {19, "ENODEV", "operation not supported by device"}, - {20, "ENOTDIR", "not a directory"}, - {21, "EISDIR", "is a directory"}, - {22, "EINVAL", "invalid argument"}, - {23, "ENFILE", "too many open files in system"}, - {24, "EMFILE", "too many open files"}, - {25, "ENOTTY", "inappropriate ioctl for device"}, - {26, "ETXTBSY", "text file busy"}, - {27, "EFBIG", "file too large"}, - {28, "ENOSPC", "no space left on device"}, - {29, "ESPIPE", "illegal seek"}, - {30, "EROFS", "read-only file system"}, - {31, "EMLINK", "too many links"}, - {32, "EPIPE", "broken pipe"}, - {33, "EDOM", "numerical argument out of domain"}, - {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, - {36, "EINPROGRESS", "operation now in progress"}, - {37, "EALREADY", "operation already in progress"}, - {38, "ENOTSOCK", "socket operation on non-socket"}, - {39, "EDESTADDRREQ", "destination address required"}, - {40, "EMSGSIZE", "message too long"}, - {41, "EPROTOTYPE", "protocol wrong type for socket"}, - {42, "ENOPROTOOPT", "protocol not available"}, - {43, "EPROTONOSUPPORT", "protocol not supported"}, - {44, "ESOCKTNOSUPPORT", "socket type not supported"}, - {45, "ENOTSUP", "operation not supported"}, - {46, "EPFNOSUPPORT", "protocol family not supported"}, - {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, - {48, "EADDRINUSE", "address already in use"}, - {49, "EADDRNOTAVAIL", "can't assign requested address"}, - {50, "ENETDOWN", "network is down"}, - {51, "ENETUNREACH", "network is unreachable"}, - {52, "ENETRESET", "network dropped connection on reset"}, - {53, "ECONNABORTED", "software caused connection abort"}, - {54, "ECONNRESET", "connection reset by peer"}, - {55, "ENOBUFS", "no buffer space available"}, - {56, "EISCONN", "socket is already connected"}, - {57, "ENOTCONN", "socket is not connected"}, - {58, "ESHUTDOWN", "can't send after socket shutdown"}, - {59, "ETOOMANYREFS", "too many references: can't splice"}, - {60, "ETIMEDOUT", "operation timed out"}, - {61, "ECONNREFUSED", "connection refused"}, - {62, "ELOOP", "too many levels of symbolic links"}, - {63, "ENAMETOOLONG", "file name too long"}, - {64, "EHOSTDOWN", "host is down"}, - {65, "EHOSTUNREACH", "no route to host"}, - {66, "ENOTEMPTY", "directory not empty"}, - {67, "EPROCLIM", "too many processes"}, - {68, "EUSERS", "too many users"}, - {69, "EDQUOT", "disc quota exceeded"}, - {70, "ESTALE", "stale NFS file handle"}, - {71, "EREMOTE", "too many levels of remote in path"}, - {72, "EBADRPC", "RPC struct is bad"}, - {73, "ERPCMISMATCH", "RPC version wrong"}, - {74, "EPROGUNAVAIL", "RPC prog. not avail"}, - {75, "EPROGMISMATCH", "program version wrong"}, - {76, "EPROCUNAVAIL", "bad procedure for program"}, - {77, "ENOLCK", "no locks available"}, - {78, "ENOSYS", "function not implemented"}, - {79, "EFTYPE", "inappropriate file type or format"}, - {80, "EAUTH", "authentication error"}, - {81, "ENEEDAUTH", "need authenticator"}, - {82, "EPWROFF", "device power is off"}, - {83, "EDEVERR", "device error"}, - {84, "EOVERFLOW", "value too large to be stored in data type"}, - {85, "EBADEXEC", "bad executable (or shared library)"}, - {86, "EBADARCH", "bad CPU type in executable"}, - {87, "ESHLIBVERS", "shared library version mismatch"}, - {88, "EBADMACHO", "malformed Mach-o file"}, - {89, "ECANCELED", "operation canceled"}, - {90, "EIDRM", "identifier removed"}, - {91, "ENOMSG", "no message of desired type"}, - {92, "EILSEQ", "illegal byte sequence"}, - {93, "ENOATTR", "attribute not found"}, - {94, "EBADMSG", "bad message"}, - {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, - {96, "ENODATA", "no message available on STREAM"}, - {97, "ENOLINK", "ENOLINK (Reserved)"}, - {98, "ENOSR", "no STREAM resources"}, - {99, "ENOSTR", "not a STREAM"}, - {100, "EPROTO", "protocol error"}, - {101, "ETIME", "STREAM ioctl timeout"}, - {102, "EOPNOTSUPP", "operation not supported on socket"}, - {103, "ENOPOLICY", "policy not found"}, - {104, "ENOTRECOVERABLE", "state not recoverable"}, - {105, "EOWNERDEAD", "previous owner died"}, - {106, "EQFULL", "interface output queue is full"}, -} - -// Signal table -var signalList = [...]struct { - num syscall.Signal - name string - desc string -}{ - {1, "SIGHUP", "hangup"}, - {2, "SIGINT", "interrupt"}, - {3, "SIGQUIT", "quit"}, - {4, "SIGILL", "illegal instruction"}, - {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, - {7, "SIGEMT", "EMT trap"}, - {8, "SIGFPE", "floating point exception"}, - {9, "SIGKILL", "killed"}, - {10, "SIGBUS", "bus error"}, - {11, "SIGSEGV", "segmentation fault"}, - {12, "SIGSYS", "bad system call"}, - {13, "SIGPIPE", "broken pipe"}, - {14, "SIGALRM", "alarm clock"}, - {15, "SIGTERM", "terminated"}, - {16, "SIGURG", "urgent I/O condition"}, - {17, "SIGSTOP", "suspended (signal)"}, - {18, "SIGTSTP", "suspended"}, - {19, "SIGCONT", "continued"}, - {20, "SIGCHLD", "child exited"}, - {21, "SIGTTIN", "stopped (tty input)"}, - {22, "SIGTTOU", "stopped (tty output)"}, - {23, "SIGIO", "I/O possible"}, - {24, "SIGXCPU", "cputime limit exceeded"}, - {25, "SIGXFSZ", "filesize limit exceeded"}, - {26, "SIGVTALRM", "virtual timer expired"}, - {27, "SIGPROF", "profiling timer expired"}, - {28, "SIGWINCH", "window size changes"}, - {29, "SIGINFO", "information request"}, - {30, "SIGUSR1", "user defined signal 1"}, - {31, "SIGUSR2", "user defined signal 2"}, -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index b40fb1f69..e644eaf5e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && darwin // +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -32,7 +33,7 @@ const ( AF_LAT = 0xe AF_LINK = 0x12 AF_LOCAL = 0x1 - AF_MAX = 0x28 + AF_MAX = 0x29 AF_NATM = 0x1f AF_NDRV = 0x1b AF_NETBIOS = 0x21 @@ -49,6 +50,7 @@ const ( AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 + AF_VSOCK = 0x28 ALTWERASE = 0x200 ATTR_BIT_MAP_COUNT = 0x5 ATTR_CMN_ACCESSMASK = 0x20000 @@ -83,7 +85,7 @@ const ( ATTR_CMN_PAROBJID = 0x80 ATTR_CMN_RETURNED_ATTRS = 0x80000000 ATTR_CMN_SCRIPT = 0x100 - ATTR_CMN_SETMASK = 0x41c7ff00 + ATTR_CMN_SETMASK = 0x51c7ff00 ATTR_CMN_USERACCESS = 0x200000 ATTR_CMN_UUID = 0x800000 ATTR_CMN_VALIDMASK = 0xffffffff @@ -357,7 +359,7 @@ const ( DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0xf5 + DLT_MATCHING_MAX = 0x10a DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -398,6 +400,7 @@ const ( DLT_SYMANTEC_FIREWALL = 0x63 DLT_TZSP = 0x80 DLT_USB = 0xba + DLT_USB_DARWIN = 0x10a DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc DLT_USER0 = 0x93 @@ -442,8 +445,8 @@ const ( EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf + EVFILT_SYSCOUNT = 0x11 + EVFILT_THREADMARKER = 0x11 EVFILT_TIMER = -0x7 EVFILT_USER = -0xa EVFILT_VM = -0xc @@ -481,9 +484,12 @@ const ( FSOPT_NOINMEMUPDATE = 0x2 FSOPT_PACK_INVAL_ATTRS = 0x8 FSOPT_REPORT_FULLSIZE = 0x4 + FSOPT_RETURN_REALDEV = 0x200 F_ADDFILESIGS = 0x3d F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 + F_ADDFILESIGS_INFO = 0x67 F_ADDFILESIGS_RETURN = 0x61 + F_ADDFILESUPPL = 0x68 F_ADDSIGS = 0x3b F_ALLOCATEALL = 0x4 F_ALLOCATECONTIG = 0x2 @@ -505,8 +511,10 @@ const ( F_GETOWN = 0x5 F_GETPATH = 0x32 F_GETPATH_MTMINFO = 0x47 + F_GETPATH_NOFIRMLINK = 0x66 F_GETPROTECTIONCLASS = 0x3f F_GETPROTECTIONLEVEL = 0x4d + F_GETSIGSINFO = 0x69 F_GLOBAL_NOCACHE = 0x37 F_LOG2PHYS = 0x31 F_LOG2PHYS_EXT = 0x41 @@ -531,6 +539,7 @@ const ( F_SETPROTECTIONCLASS = 0x40 F_SETSIZE = 0x2b F_SINGLE_WRITER = 0x4c + F_SPECULATIVE_READ = 0x65 F_THAW_FS = 0x36 F_TRANSCODEKEY = 0x4b F_TRIM_ACTIVE_FILE = 0x64 @@ -562,6 +571,7 @@ const ( IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 + IFT_6LOWPAN = 0x40 IFT_AAL5 = 0x31 IFT_ARCNET = 0x23 IFT_ARCNETPLUS = 0x24 @@ -766,16 +776,28 @@ const ( IPV6_2292PKTINFO = 0x13 IPV6_2292PKTOPTIONS = 0x19 IPV6_2292RTHDR = 0x18 + IPV6_3542DSTOPTS = 0x32 + IPV6_3542HOPLIMIT = 0x2f + IPV6_3542HOPOPTS = 0x31 + IPV6_3542NEXTHOP = 0x30 + IPV6_3542PKTINFO = 0x2e + IPV6_3542RTHDR = 0x33 + IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 + IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 + IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 + IPV6_AUTOFLOWLABEL = 0x3b IPV6_BINDV6ONLY = 0x1b IPV6_BOUND_IF = 0x7d IPV6_CHECKSUM = 0x1a IPV6_DEFAULT_MULTICAST_HOPS = 0x1 IPV6_DEFAULT_MULTICAST_LOOP = 0x1 IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 IPV6_FAITH = 0x1d IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOW_ECN_MASK = 0x300 + IPV6_FLOW_ECN_MASK = 0x3000 IPV6_FRAGTTL = 0x3c IPV6_FW_ADD = 0x1e IPV6_FW_DEL = 0x1f @@ -783,6 +805,8 @@ const ( IPV6_FW_GET = 0x22 IPV6_FW_ZERO = 0x21 IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 IPV6_IPSEC_POLICY = 0x1c IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd @@ -794,20 +818,34 @@ const ( IPV6_MAX_SOCK_SRC_FILTER = 0x80 IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe IPV6_PORTRANGE_DEFAULT = 0x0 IPV6_PORTRANGE_HIGH = 0x1 IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x3d + IPV6_RECVRTHDR = 0x26 IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x39 IPV6_RTHDR_LOOSE = 0x0 IPV6_RTHDR_STRICT = 0x1 IPV6_RTHDR_TYPE_0 = 0x0 IPV6_SOCKOPT_RESERVED1 = 0x3 IPV6_TCLASS = 0x24 IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -818,6 +856,7 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 + IP_DONTFRAG = 0x1c IP_DROP_MEMBERSHIP = 0xd IP_DROP_SOURCE_MEMBERSHIP = 0x47 IP_DUMMYNET_CONFIGURE = 0x3c @@ -889,6 +928,12 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_PEERCRED = 0x1 + LOCAL_PEEREPID = 0x3 + LOCAL_PEEREUUID = 0x5 + LOCAL_PEERPID = 0x2 + LOCAL_PEERTOKEN = 0x6 + LOCAL_PEERUUID = 0x4 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -904,6 +949,7 @@ const ( MADV_SEQUENTIAL = 0x2 MADV_WILLNEED = 0x3 MADV_ZERO_WIRED_PAGES = 0x6 + MAP_32BIT = 0x8000 MAP_ANON = 0x1000 MAP_ANONYMOUS = 0x1000 MAP_COPY = 0x2 @@ -920,6 +966,17 @@ const ( MAP_RESILIENT_CODESIGN = 0x2000 MAP_RESILIENT_MEDIA = 0x4000 MAP_SHARED = 0x1 + MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000 + MAP_UNIX03 = 0x40000 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ASYNC = 0x40 @@ -931,6 +988,7 @@ const ( MNT_DOVOLFS = 0x8000 MNT_DWAIT = 0x4 MNT_EXPORTED = 0x100 + MNT_EXT_ROOT_DATA_VOL = 0x1 MNT_FORCE = 0x80000 MNT_IGNORE_OWNERSHIP = 0x200000 MNT_JOURNALED = 0x800000 @@ -947,12 +1005,15 @@ const ( MNT_QUOTA = 0x2000 MNT_RDONLY = 0x1 MNT_RELOAD = 0x40000 + MNT_REMOVABLE = 0x200 MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x40000000 + MNT_STRICTATIME = 0x80000000 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 MNT_UNKNOWNPERMISSIONS = 0x200000 MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff + MNT_VISFLAGMASK = 0xd7f0f7ff MNT_WAIT = 0x1 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 @@ -963,6 +1024,7 @@ const ( MSG_HAVEMORE = 0x2000 MSG_HOLD = 0x800 MSG_NEEDSA = 0x10000 + MSG_NOSIGNAL = 0x80000 MSG_OOB = 0x1 MSG_PEEK = 0x2 MSG_RCVMORE = 0x4000 @@ -979,9 +1041,10 @@ const ( NET_RT_DUMP = 0x1 NET_RT_DUMP2 = 0x7 NET_RT_FLAGS = 0x2 + NET_RT_FLAGS_PRIV = 0xa NET_RT_IFLIST = 0x3 NET_RT_IFLIST2 = 0x6 - NET_RT_MAXID = 0xa + NET_RT_MAXID = 0xb NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 NFDBITS = 0x20 @@ -1019,6 +1082,7 @@ const ( NOTE_LEEWAY = 0x10 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_MACHTIME = 0x100 NOTE_MACH_CONTINUOUS_TIME = 0x80 NOTE_NONE = 0x80 NOTE_NSECONDS = 0x4 @@ -1065,6 +1129,7 @@ const ( O_NDELAY = 0x4 O_NOCTTY = 0x20000 O_NOFOLLOW = 0x100 + O_NOFOLLOW_ANY = 0x20000000 O_NONBLOCK = 0x4 O_POPUP = 0x80000000 O_RDONLY = 0x0 @@ -1136,6 +1201,7 @@ const ( RTF_BROADCAST = 0x400000 RTF_CLONING = 0x100 RTF_CONDEMNED = 0x2000000 + RTF_DEAD = 0x20000000 RTF_DELCLONE = 0x80 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 @@ -1143,6 +1209,7 @@ const ( RTF_HOST = 0x4 RTF_IFREF = 0x4000000 RTF_IFSCOPE = 0x1000000 + RTF_LLDATA = 0x400 RTF_LLINFO = 0x400 RTF_LOCAL = 0x200000 RTF_MODIFIED = 0x20 @@ -1210,6 +1277,7 @@ const ( SIOCGDRVSPEC = 0xc028697b SIOCGETVLAN = 0xc020697f SIOCGHIWAT = 0x40047301 + SIOCGIF6LOWPAN = 0xc02069c5 SIOCGIFADDR = 0xc0206921 SIOCGIFALTMTU = 0xc0206948 SIOCGIFASYNCMAP = 0xc020697c @@ -1220,6 +1288,7 @@ const ( SIOCGIFDEVMTU = 0xc0206944 SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFLAGS = 0xc0206911 + SIOCGIFFUNCTIONALTYPE = 0xc02069ad SIOCGIFGENERIC = 0xc020693a SIOCGIFKPI = 0xc0206987 SIOCGIFMAC = 0xc0206982 @@ -1233,6 +1302,7 @@ const ( SIOCGIFSTATUS = 0xc331693d SIOCGIFVLAN = 0xc020697f SIOCGIFWAKEFLAGS = 0xc0206988 + SIOCGIFXMEDIA = 0xc02c6948 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCIFCREATE = 0xc0206978 @@ -1243,6 +1313,7 @@ const ( SIOCSDRVSPEC = 0x8028697b SIOCSETVLAN = 0x8020697e SIOCSHIWAT = 0x80047300 + SIOCSIF6LOWPAN = 0x802069c4 SIOCSIFADDR = 0x8020690c SIOCSIFALTMTU = 0x80206945 SIOCSIFASYNCMAP = 0x8020697d @@ -1270,6 +1341,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index f5e91b7ab..17bba0e44 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && dragonfly // +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 3689c8084..9c7c5e165 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && freebsd // +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -997,6 +998,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1375,6 +1381,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index b8f7c3c93..b265abb25 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && freebsd // +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -997,6 +998,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1376,6 +1382,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index be14bb1a4..3df99f285 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && freebsd // +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -980,6 +981,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1016,6 +1022,15 @@ const ( MAP_RESERVED0100 = 0x100 MAP_SHARED = 0x1 MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ACLS = 0x8000000 @@ -1341,6 +1356,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 7ce9c0081..218d39906 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && freebsd // +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -997,6 +998,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1376,6 +1382,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index b3463a8b5..c3fa22486 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,5 +1,6 @@ // Code generated by mkmerge.go; DO NOT EDIT. +//go:build linux // +build linux package unix @@ -165,13 +166,16 @@ const ( BPF_ALU64 = 0x7 BPF_AND = 0x50 BPF_ARSH = 0xc0 + BPF_ATOMIC = 0xc0 BPF_B = 0x10 BPF_BUILD_ID_SIZE = 0x14 BPF_CALL = 0x80 + BPF_CMPXCHG = 0xf1 BPF_DIV = 0x30 BPF_DW = 0x18 BPF_END = 0xd0 BPF_EXIT = 0x90 + BPF_FETCH = 0x1 BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 @@ -239,11 +243,16 @@ const ( BPF_W = 0x0 BPF_X = 0x8 BPF_XADD = 0xc0 + BPF_XCHG = 0xe1 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 BTRFS_SUPER_MAGIC = 0x9123683e BTRFS_TEST_MAGIC = 0x73727279 + BUS_BLUETOOTH = 0x5 + BUS_HIL = 0x4 + BUS_USB = 0x3 + BUS_VIRTUAL = 0x6 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -313,6 +322,7 @@ const ( CAN_J1939 = 0x7 CAN_MAX_DLC = 0x8 CAN_MAX_DLEN = 0x8 + CAN_MAX_RAW_DLC = 0xf CAN_MCNET = 0x5 CAN_MTU = 0x10 CAN_NPROTO = 0x8 @@ -484,9 +494,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2020-10-01)" + DM_VERSION_EXTRA = "-ioctl (2021-02-01)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2b + DM_VERSION_MINOR = 0x2c DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -664,6 +674,7 @@ const ( ETH_P_CAIF = 0xf7 ETH_P_CAN = 0xc ETH_P_CANFD = 0xd + ETH_P_CFM = 0x8902 ETH_P_CONTROL = 0x16 ETH_P_CUST = 0x6006 ETH_P_DDCMP = 0x6 @@ -834,7 +845,6 @@ const ( FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0 FSCRYPT_POLICY_FLAGS_PAD_8 = 0x1 FSCRYPT_POLICY_FLAGS_PAD_MASK = 0x3 - FSCRYPT_POLICY_FLAGS_VALID = 0x1f FSCRYPT_POLICY_FLAG_DIRECT_KEY = 0x4 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 = 0x10 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 = 0x8 @@ -854,6 +864,7 @@ const ( FS_IOC_GET_ENCRYPTION_KEY_STATUS = 0xc080661a FS_IOC_GET_ENCRYPTION_POLICY_EX = 0xc0096616 FS_IOC_MEASURE_VERITY = 0xc0046686 + FS_IOC_READ_VERITY_METADATA = 0xc0286687 FS_IOC_REMOVE_ENCRYPTION_KEY = 0xc0406618 FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS = 0xc0406619 FS_KEY_DESCRIPTOR_SIZE = 0x8 @@ -865,10 +876,13 @@ const ( FS_POLICY_FLAGS_PAD_4 = 0x0 FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x1f + FS_POLICY_FLAGS_VALID = 0x7 FS_VERITY_FL = 0x100000 FS_VERITY_HASH_ALG_SHA256 = 0x1 FS_VERITY_HASH_ALG_SHA512 = 0x2 + FS_VERITY_METADATA_TYPE_DESCRIPTOR = 0x2 + FS_VERITY_METADATA_TYPE_MERKLE_TREE = 0x1 + FS_VERITY_METADATA_TYPE_SIGNATURE = 0x3 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -962,11 +976,17 @@ const ( HDIO_SET_XFER = 0x306 HDIO_TRISTATE_HWIF = 0x31b HDIO_UNREGISTER_HWIF = 0x32a + HID_MAX_DESCRIPTOR_SIZE = 0x1000 HOSTFS_SUPER_MAGIC = 0xc0ffee HPFS_SUPER_MAGIC = 0xf995e849 HUGETLBFS_MAGIC = 0x958458f6 IBSHIFT = 0x10 ICMPV6_FILTER = 0x1 + ICMPV6_FILTER_BLOCK = 0x1 + ICMPV6_FILTER_BLOCKOTHERS = 0x3 + ICMPV6_FILTER_PASS = 0x2 + ICMPV6_FILTER_PASSONLY = 0x4 + ICMP_FILTER = 0x1 ICRNL = 0x100 IFA_F_DADFAILED = 0x8 IFA_F_DEPRECATED = 0x20 @@ -1138,6 +1158,7 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVERR_RFC4884 = 0x1f IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 @@ -1202,6 +1223,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 @@ -1384,6 +1406,10 @@ const ( MCAST_LEAVE_SOURCE_GROUP = 0x2f MCAST_MSFILTER = 0x30 MCAST_UNBLOCK_SOURCE = 0x2c + MEMGETREGIONINFO = 0xc0104d08 + MEMREADOOB64 = 0xc0184d16 + MEMWRITE = 0xc0304d18 + MEMWRITEOOB64 = 0xc0184d15 MFD_ALLOW_SEALING = 0x2 MFD_CLOEXEC = 0x1 MFD_HUGETLB = 0x4 @@ -1472,7 +1498,35 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_ABSENT = 0x0 + MTD_BIT_WRITEABLE = 0x800 + MTD_CAP_NANDFLASH = 0x400 + MTD_CAP_NORFLASH = 0xc00 + MTD_CAP_NVRAM = 0x1c00 + MTD_CAP_RAM = 0x1c00 + MTD_CAP_ROM = 0x0 + MTD_DATAFLASH = 0x6 MTD_INODE_FS_MAGIC = 0x11307854 + MTD_MAX_ECCPOS_ENTRIES = 0x40 + MTD_MAX_OOBFREE_ENTRIES = 0x8 + MTD_MLCNANDFLASH = 0x8 + MTD_NANDECC_AUTOPLACE = 0x2 + MTD_NANDECC_AUTOPL_USR = 0x4 + MTD_NANDECC_OFF = 0x0 + MTD_NANDECC_PLACE = 0x1 + MTD_NANDECC_PLACEONLY = 0x3 + MTD_NANDFLASH = 0x4 + MTD_NORFLASH = 0x3 + MTD_NO_ERASE = 0x1000 + MTD_OTP_FACTORY = 0x1 + MTD_OTP_OFF = 0x0 + MTD_OTP_USER = 0x2 + MTD_POWERUP_LOCK = 0x2000 + MTD_RAM = 0x1 + MTD_ROM = 0x2 + MTD_SLC_ON_MLC_EMULATION = 0x4000 + MTD_UBIVOLUME = 0x7 + MTD_WRITEABLE = 0x400 NAME_MAX = 0xff NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 @@ -1512,6 +1566,59 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFC_ATR_REQ_GB_MAXSIZE = 0x30 + NFC_ATR_REQ_MAXSIZE = 0x40 + NFC_ATR_RES_GB_MAXSIZE = 0x2f + NFC_ATR_RES_MAXSIZE = 0x40 + NFC_COMM_ACTIVE = 0x0 + NFC_COMM_PASSIVE = 0x1 + NFC_DEVICE_NAME_MAXSIZE = 0x8 + NFC_DIRECTION_RX = 0x0 + NFC_DIRECTION_TX = 0x1 + NFC_FIRMWARE_NAME_MAXSIZE = 0x20 + NFC_GB_MAXSIZE = 0x30 + NFC_GENL_MCAST_EVENT_NAME = "events" + NFC_GENL_NAME = "nfc" + NFC_GENL_VERSION = 0x1 + NFC_HEADER_SIZE = 0x1 + NFC_ISO15693_UID_MAXSIZE = 0x8 + NFC_LLCP_MAX_SERVICE_NAME = 0x3f + NFC_LLCP_MIUX = 0x1 + NFC_LLCP_REMOTE_LTO = 0x3 + NFC_LLCP_REMOTE_MIU = 0x2 + NFC_LLCP_REMOTE_RW = 0x4 + NFC_LLCP_RW = 0x0 + NFC_NFCID1_MAXSIZE = 0xa + NFC_NFCID2_MAXSIZE = 0x8 + NFC_NFCID3_MAXSIZE = 0xa + NFC_PROTO_FELICA = 0x3 + NFC_PROTO_FELICA_MASK = 0x8 + NFC_PROTO_ISO14443 = 0x4 + NFC_PROTO_ISO14443_B = 0x6 + NFC_PROTO_ISO14443_B_MASK = 0x40 + NFC_PROTO_ISO14443_MASK = 0x10 + NFC_PROTO_ISO15693 = 0x7 + NFC_PROTO_ISO15693_MASK = 0x80 + NFC_PROTO_JEWEL = 0x1 + NFC_PROTO_JEWEL_MASK = 0x2 + NFC_PROTO_MAX = 0x8 + NFC_PROTO_MIFARE = 0x2 + NFC_PROTO_MIFARE_MASK = 0x4 + NFC_PROTO_NFC_DEP = 0x5 + NFC_PROTO_NFC_DEP_MASK = 0x20 + NFC_RAW_HEADER_SIZE = 0x2 + NFC_RF_INITIATOR = 0x0 + NFC_RF_NONE = 0x2 + NFC_RF_TARGET = 0x1 + NFC_SENSB_RES_MAXSIZE = 0xc + NFC_SENSF_RES_MAXSIZE = 0x12 + NFC_SE_DISABLED = 0x0 + NFC_SE_EMBEDDED = 0x2 + NFC_SE_ENABLED = 0x1 + NFC_SE_UICC = 0x1 + NFC_SOCKPROTO_LLCP = 0x1 + NFC_SOCKPROTO_MAX = 0x2 + NFC_SOCKPROTO_RAW = 0x0 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1659,6 +1766,10 @@ const ( PERF_FLAG_PID_CGROUP = 0x4 PERF_MAX_CONTEXTS_PER_STACK = 0x8 PERF_MAX_STACK_DEPTH = 0x7f + PERF_MEM_BLK_ADDR = 0x4 + PERF_MEM_BLK_DATA = 0x2 + PERF_MEM_BLK_NA = 0x1 + PERF_MEM_BLK_SHIFT = 0x28 PERF_MEM_LOCK_LOCKED = 0x2 PERF_MEM_LOCK_NA = 0x1 PERF_MEM_LOCK_SHIFT = 0x18 @@ -1722,12 +1833,14 @@ const ( PERF_RECORD_MISC_GUEST_USER = 0x5 PERF_RECORD_MISC_HYPERVISOR = 0x3 PERF_RECORD_MISC_KERNEL = 0x1 + PERF_RECORD_MISC_MMAP_BUILD_ID = 0x4000 PERF_RECORD_MISC_MMAP_DATA = 0x2000 PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT = 0x1000 PERF_RECORD_MISC_SWITCH_OUT = 0x2000 PERF_RECORD_MISC_SWITCH_OUT_PREEMPT = 0x4000 PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 + PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 PIPEFS_MAGIC = 0x50495045 PPC_CMM_MAGIC = 0xc7571590 PPPIOCGNPMODE = 0xc008744c @@ -1840,6 +1953,7 @@ const ( PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_SPECULATION_CTRL = 0x35 + PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d @@ -1859,6 +1973,8 @@ const ( PR_SVE_SET_VL_ONEXEC = 0x40000 PR_SVE_VL_INHERIT = 0x20000 PR_SVE_VL_LEN_MASK = 0xffff + PR_SYS_DISPATCH_OFF = 0x0 + PR_SYS_DISPATCH_ON = 0x1 PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 @@ -1928,6 +2044,11 @@ const ( QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 RAMFS_MAGIC = 0x858458f6 + RAW_PAYLOAD_DIGITAL = 0x3 + RAW_PAYLOAD_HCI = 0x2 + RAW_PAYLOAD_LLCP = 0x0 + RAW_PAYLOAD_NCI = 0x1 + RAW_PAYLOAD_PROPRIETARY = 0x4 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 RENAME_EXCHANGE = 0x2 @@ -1978,6 +2099,10 @@ const ( RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 RTC_AF = 0x20 + RTC_FEATURE_ALARM = 0x0 + RTC_FEATURE_ALARM_RES_MINUTE = 0x1 + RTC_FEATURE_CNT = 0x3 + RTC_FEATURE_NEED_WEEK_DAY = 0x2 RTC_IRQF = 0x80 RTC_MAX_FREQ = 0x2000 RTC_PF = 0x40 @@ -2051,6 +2176,7 @@ const ( RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_OFFLOAD = 0x4000 + RTM_F_OFFLOAD_FAILED = 0x20000000 RTM_F_PREFIX = 0x800 RTM_F_TRAP = 0x8000 RTM_GETACTION = 0x32 @@ -2104,12 +2230,13 @@ const ( RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 + RTNH_COMPARE_MASK = 0x59 RTNH_F_DEAD = 0x1 RTNH_F_LINKDOWN = 0x10 RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_TRAP = 0x40 RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a @@ -2580,6 +2707,7 @@ const ( VMADDR_CID_HOST = 0x2 VMADDR_CID_HYPERVISOR = 0x0 VMADDR_CID_LOCAL = 0x1 + VMADDR_FLAG_TO_HOST = 0x1 VMADDR_PORT_ANY = 0xffffffff VM_SOCKETS_INVALID_VERSION = 0xffffffff VQUIT = 0x1 @@ -2727,6 +2855,9 @@ const ( Z3FOLD_MAGIC = 0x33 ZONEFS_MAGIC = 0x5a4f4653 ZSMALLOC_MAGIC = 0x58295829 + _HIDIOCGRAWNAME_LEN = 0x80 + _HIDIOCGRAWPHYS_LEN = 0x40 + _HIDIOCGRAWUNIQ_LEN = 0x40 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 336e0b326..09fc559ed 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && linux // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -93,6 +96,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -119,6 +125,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -128,6 +147,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -165,6 +188,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -192,6 +216,7 @@ const ( PPPIOCSPASS = 0x40087447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff PTRACE_GETFPREGS = 0xe @@ -268,6 +293,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -290,6 +316,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -481,6 +508,9 @@ const ( X86_FXSR_MAGIC = 0x0 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 961507e93..75730cc22 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && linux // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -93,6 +96,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -119,6 +125,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -128,6 +147,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -165,6 +188,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -192,6 +216,7 @@ const ( PPPIOCSPASS = 0x40107447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_ARCH_PRCTL = 0x1e @@ -269,6 +294,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -291,6 +317,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -481,6 +508,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a65576db7..127cf17ad 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && linux // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x40087447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff PTRACE_GETCRUNCHREGS = 0x19 @@ -275,6 +300,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -297,6 +323,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -487,6 +514,9 @@ const ( WORDSIZE = 0x20 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index cf075caa8..957ca1ff1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && linux // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -95,6 +98,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -120,6 +126,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -129,6 +148,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -166,6 +189,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -193,8 +217,10 @@ const ( PPPIOCSPASS = 0x40107447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PROT_BTI = 0x10 + PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 @@ -264,6 +290,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -286,6 +313,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -477,6 +505,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index efe90deea..314a2054f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips && linux // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x80087447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff PTRACE_GETFPREGS = 0xe @@ -268,6 +293,7 @@ const ( SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -290,6 +316,7 @@ const ( SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1e + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 @@ -483,6 +510,9 @@ const ( WORDSIZE = 0x20 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 8b0e8911d..457e8de97 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && linux // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x80107447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_GETFPREGS = 0xe @@ -268,6 +293,7 @@ const ( SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -290,6 +316,7 @@ const ( SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1e + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 @@ -483,6 +510,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index e9430cd1a..33cd28f6b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64le && linux // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x80107447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_GETFPREGS = 0xe @@ -268,6 +293,7 @@ const ( SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -290,6 +316,7 @@ const ( SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1e + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 @@ -483,6 +510,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 61e4f5db6..0e085ba14 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mipsle && linux // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x80087447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff PTRACE_GETFPREGS = 0xe @@ -268,6 +293,7 @@ const ( SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -290,6 +316,7 @@ const ( SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1e + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 @@ -483,6 +510,9 @@ const ( WORDSIZE = 0x20 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go new file mode 100644 index 000000000..1b5928cff --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -0,0 +1,879 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc && linux +// +build ppc,linux + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go + +package unix + +import "syscall" + +const ( + B1000000 = 0x17 + B115200 = 0x11 + B1152000 = 0x18 + B1500000 = 0x19 + B2000000 = 0x1a + B230400 = 0x12 + B2500000 = 0x1b + B3000000 = 0x1c + B3500000 = 0x1d + B4000000 = 0x1e + B460800 = 0x13 + B500000 = 0x14 + B57600 = 0x10 + B576000 = 0x15 + B921600 = 0x16 + BLKBSZGET = 0x40041270 + BLKBSZSET = 0x80041271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40041272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1f + BS1 = 0x8000 + BSDLY = 0x8000 + CBAUD = 0xff + CBAUDEX = 0x0 + CIBAUD = 0xff0000 + CLOCAL = 0x8000 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTOPB = 0x400 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000000 + FF1 = 0x4000 + FFDLY = 0x4000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d + FLUSHO = 0x800000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40046601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SETFLAGS = 0x80046602 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0xc + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0xd + F_SETLK64 = 0xd + F_SETLKW = 0xe + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 + HUPCL = 0x4000 + ICANON = 0x100 + IEXTEN = 0x400 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x80 + IUCLC = 0x1000 + IXOFF = 0x400 + IXON = 0x200 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x80 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 + NFDBITS = 0x20 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x4 + ONLCR = 0x2 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x20000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x10000 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + PARENB = 0x1000 + PARODD = 0x2000 + PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4008743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80087446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x800c744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80087447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 + PPPIOCXFERUNIT = 0x2000744e + PROT_SAO = 0x10 + PR_SET_PTRACER_ANY = 0xffffffff + PTRACE_GETEVRREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS64 = 0x16 + PTRACE_GETVRREGS = 0x12 + PTRACE_GETVSRREGS = 0x1b + PTRACE_GET_DEBUGREG = 0x19 + PTRACE_SETEVRREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETREGS64 = 0x17 + PTRACE_SETVRREGS = 0x13 + PTRACE_SETVSRREGS = 0x1c + PTRACE_SET_DEBUGREG = 0x1a + PTRACE_SINGLEBLOCK = 0x100 + PTRACE_SYSEMU = 0x1d + PTRACE_SYSEMU_SINGLESTEP = 0x1e + PT_CCR = 0x26 + PT_CTR = 0x23 + PT_DAR = 0x29 + PT_DSCR = 0x2c + PT_DSISR = 0x2a + PT_FPR0 = 0x30 + PT_FPR31 = 0x6e + PT_FPSCR = 0x71 + PT_LNK = 0x24 + PT_MQ = 0x27 + PT_MSR = 0x21 + PT_NIP = 0x20 + PT_ORIG_R3 = 0x22 + PT_R0 = 0x0 + PT_R1 = 0x1 + PT_R10 = 0xa + PT_R11 = 0xb + PT_R12 = 0xc + PT_R13 = 0xd + PT_R14 = 0xe + PT_R15 = 0xf + PT_R16 = 0x10 + PT_R17 = 0x11 + PT_R18 = 0x12 + PT_R19 = 0x13 + PT_R2 = 0x2 + PT_R20 = 0x14 + PT_R21 = 0x15 + PT_R22 = 0x16 + PT_R23 = 0x17 + PT_R24 = 0x18 + PT_R25 = 0x19 + PT_R26 = 0x1a + PT_R27 = 0x1b + PT_R28 = 0x1c + PT_R29 = 0x1d + PT_R3 = 0x3 + PT_R30 = 0x1e + PT_R31 = 0x1f + PT_R4 = 0x4 + PT_R5 = 0x5 + PT_R6 = 0x6 + PT_R7 = 0x7 + PT_R8 = 0x8 + PT_R9 = 0x9 + PT_REGS_COUNT = 0x2c + PT_RESULT = 0x2b + PT_TRAP = 0x28 + PT_XER = 0x25 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4004700d + RTC_EPOCH_SET = 0x8004700e + RTC_IRQP_READ = 0x4004700b + RTC_IRQP_SET = 0x8004700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x401c7011 + RTC_PLL_SET = 0x801c7012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x14 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x15 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x10 + SO_RCVTIMEO = 0x12 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x12 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x11 + SO_SNDTIMEO = 0x13 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x13 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TCFLSH = 0x2000741f + TCGETA = 0x40147417 + TCGETS = 0x402c7413 + TCSAFLUSH = 0x2 + TCSBRK = 0x2000741d + TCSBRKP = 0x5425 + TCSETA = 0x80147418 + TCSETAF = 0x8014741c + TCSETAW = 0x80147419 + TCSETS = 0x802c7414 + TCSETSF = 0x802c7416 + TCSETSW = 0x802c7415 + TCXONC = 0x2000741e + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x40045432 + TIOCGETC = 0x40067412 + TIOCGETD = 0x5424 + TIOCGETP = 0x40067408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETC = 0x80067411 + TIOCSETD = 0x5423 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTART = 0x2000746e + TIOCSTI = 0x5412 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x400000 + TUNATTACHFILTER = 0x800854d5 + TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x400854db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0x10 + VEOF = 0x4 + VEOL = 0x6 + VEOL2 = 0x8 + VMIN = 0x5 + VREPRINT = 0xb + VSTART = 0xd + VSTOP = 0xe + VSUSP = 0xc + VSWTC = 0x9 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x7 + VWERASE = 0xa + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x20 + XCASE = 0x4000 + XTABS = 0xc00 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 +) + +// Errors +const ( + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + ECANCELED = syscall.Errno(0x7d) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x3a) + EDESTADDRREQ = syscall.Errno(0x59) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EISCONN = syscall.Errno(0x6a) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTCONN = syscall.Errno(0x6b) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTUNIQ = syscall.Errno(0x4c) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPFNOSUPPORT = syscall.Errno(0x60) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGIO = syscall.Signal(0x1d) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {58, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 973ad9346..f3a41d6ec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && linux // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 ECHOE = 0x2 ECHOK = 0x4 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NL2 = 0x200 NL3 = 0x300 @@ -128,6 +147,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -165,6 +188,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -192,6 +216,7 @@ const ( PPPIOCSPASS = 0x80107447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff @@ -327,6 +352,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -349,6 +375,7 @@ const ( SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -543,6 +570,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4000 XTABS = 0xc00 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 70a7406ba..6a5a555d5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64le && linux // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 ECHOE = 0x2 ECHOK = 0x4 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NL2 = 0x200 NL3 = 0x300 @@ -128,6 +147,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -165,6 +188,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -192,6 +216,7 @@ const ( PPPIOCSPASS = 0x80107447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff @@ -327,6 +352,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -349,6 +375,7 @@ const ( SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -543,6 +570,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4000 XTABS = 0xc00 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index b1bf7997c..a4da67edb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build riscv64 && linux // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x40107447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff RLIMIT_AS = 0x9 @@ -256,6 +281,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -278,6 +304,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -468,6 +495,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 7053d10ba..a7028e0ef 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build s390x && linux // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -59,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -92,6 +95,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -117,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -126,6 +145,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 @@ -163,6 +186,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 PPPIOCCONNECT = 0x4004743a PPPIOCDETACH = 0x4004743c PPPIOCDISCONN = 0x7439 @@ -190,6 +214,7 @@ const ( PPPIOCSPASS = 0x40107447 PPPIOCSRASYNCMAP = 0x40047454 PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_DISABLE_TE = 0x5010 @@ -206,6 +231,8 @@ const ( PTRACE_POKE_SYSTEM_CALL = 0x5008 PTRACE_PROT = 0x15 PTRACE_SINGLEBLOCK = 0xc + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TE_ABORT_RAND = 0x5011 PT_ACR0 = 0x90 PT_ACR1 = 0x94 @@ -329,6 +356,7 @@ const ( SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 @@ -351,6 +379,7 @@ const ( SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 @@ -541,6 +570,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 137cfe796..ed3b3286c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build sparc64 && linux // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -62,6 +63,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -96,6 +99,9 @@ const ( F_SETOWN = 0x6 F_UNLCK = 0x3 F_WRLCK = 0x2 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -122,6 +128,19 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -131,6 +150,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x40 O_CLOEXEC = 0x400000 @@ -168,6 +191,7 @@ const ( PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PPPIOCATTACH = 0x8004743d PPPIOCATTCHAN = 0x80047438 + PPPIOCBRIDGECHAN = 0x80047435 PPPIOCCONNECT = 0x8004743a PPPIOCDETACH = 0x8004743c PPPIOCDISCONN = 0x20007439 @@ -195,6 +219,7 @@ const ( PPPIOCSPASS = 0x80107447 PPPIOCSRASYNCMAP = 0x80047454 PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff PTRACE_GETFPAREGS = 0x14 @@ -322,6 +347,7 @@ const ( SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0x400 SO_BUSY_POLL = 0x30 + SO_BUSY_POLL_BUDGET = 0x49 SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 @@ -344,6 +370,7 @@ const ( SO_PEERCRED = 0x40 SO_PEERGROUPS = 0x3d SO_PEERSEC = 0x1e + SO_PREFER_BUSY_POLL = 0x48 SO_PROTOCOL = 0x1028 SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x100b @@ -531,6 +558,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 __TIOCFLUSH = 0x80047410 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 20f3a5799..72f7420d2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && netbsd // +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 90b8fcd29..8d4eb0c08 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && netbsd // +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index c5c03993b..9eef9749f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh -marm // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && netbsd // +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 14dd3c1d1..3b62ba192 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && netbsd // +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index c865a10df..593cc0fef 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 9db6b2fb6..25cb60948 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 7072526a6..a4e4c2231 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ac5efbe5a..90de7dfc3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index a74639a46..f1154ff56 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 5312c36cc..1afee6a08 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && solaris // +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -365,6 +366,7 @@ const ( HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 + ICMP6_FILTER = 0x1 ICRNL = 0x100 IEXTEN = 0x8000 IFF_ADDRCONF = 0x80000 @@ -611,6 +613,7 @@ const ( IP_RECVPKTINFO = 0x1a IP_RECVRETOPTS = 0x6 IP_RECVSLLA = 0xa + IP_RECVTOS = 0xc IP_RECVTTL = 0xb IP_RETOPTS = 0x8 IP_REUSEADDR = 0x104 @@ -703,6 +706,7 @@ const ( O_APPEND = 0x8 O_CLOEXEC = 0x800000 O_CREAT = 0x100 + O_DIRECT = 0x2000000 O_DIRECTORY = 0x1000000 O_DSYNC = 0x40 O_EXCL = 0x400 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go new file mode 100644 index 000000000..fc7d0506f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -0,0 +1,860 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +// Hand edited based on zerrors_linux_s390x.go +// TODO: auto-generate. + +package unix + +const ( + BRKINT = 0x0001 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" + IP6F_MORE_FRAG = 0x0001 + IP6F_OFF_MASK = 0xfff8 + IP6F_RESERVED_MASK = 0x0006 + IP6OPT_JUMBO = 0xc2 + IP6OPT_JUMBO_LEN = 6 + IP6OPT_MUTABLE = 0x20 + IP6OPT_NSAP_ADDR = 0xc3 + IP6OPT_PAD1 = 0x00 + IP6OPT_PADN = 0x01 + IP6OPT_ROUTER_ALERT = 0x05 + IP6OPT_TUNNEL_LIMIT = 0x04 + IP6OPT_TYPE_DISCARD = 0x40 + IP6OPT_TYPE_FORCEICMP = 0x80 + IP6OPT_TYPE_ICMP = 0xc0 + IP6OPT_TYPE_SKIP = 0x00 + IP6_ALERT_AN = 0x0002 + IP6_ALERT_MLD = 0x0000 + IP6_ALERT_RSVP = 0x0001 + IPPORT_RESERVED = 1024 + IPPORT_USERRESERVED = 5000 + IPPROTO_AH = 51 + SOL_AH = 51 + IPPROTO_DSTOPTS = 60 + SOL_DSTOPTS = 60 + IPPROTO_EGP = 8 + SOL_EGP = 8 + IPPROTO_ESP = 50 + SOL_ESP = 50 + IPPROTO_FRAGMENT = 44 + SOL_FRAGMENT = 44 + IPPROTO_GGP = 2 + SOL_GGP = 2 + IPPROTO_HOPOPTS = 0 + SOL_HOPOPTS = 0 + IPPROTO_ICMP = 1 + SOL_ICMP = 1 + IPPROTO_ICMPV6 = 58 + SOL_ICMPV6 = 58 + IPPROTO_IDP = 22 + SOL_IDP = 22 + IPPROTO_IP = 0 + SOL_IP = 0 + IPPROTO_IPV6 = 41 + SOL_IPV6 = 41 + IPPROTO_MAX = 256 + SOL_MAX = 256 + IPPROTO_NONE = 59 + SOL_NONE = 59 + IPPROTO_PUP = 12 + SOL_PUP = 12 + IPPROTO_RAW = 255 + SOL_RAW = 255 + IPPROTO_ROUTING = 43 + SOL_ROUTING = 43 + IPPROTO_TCP = 6 + SOL_TCP = 6 + IPPROTO_UDP = 17 + SOL_UDP = 17 + IPV6_ADDR_PREFERENCES = 32 + IPV6_CHECKSUM = 19 + IPV6_DONTFRAG = 29 + IPV6_DSTOPTS = 23 + IPV6_HOPLIMIT = 11 + IPV6_HOPOPTS = 22 + IPV6_JOIN_GROUP = 5 + IPV6_LEAVE_GROUP = 6 + IPV6_MULTICAST_HOPS = 9 + IPV6_MULTICAST_IF = 7 + IPV6_MULTICAST_LOOP = 4 + IPV6_NEXTHOP = 20 + IPV6_PATHMTU = 12 + IPV6_PKTINFO = 13 + IPV6_PREFER_SRC_CGA = 0x10 + IPV6_PREFER_SRC_COA = 0x02 + IPV6_PREFER_SRC_HOME = 0x01 + IPV6_PREFER_SRC_NONCGA = 0x20 + IPV6_PREFER_SRC_PUBLIC = 0x08 + IPV6_PREFER_SRC_TMP = 0x04 + IPV6_RECVDSTOPTS = 28 + IPV6_RECVHOPLIMIT = 14 + IPV6_RECVHOPOPTS = 26 + IPV6_RECVPATHMTU = 16 + IPV6_RECVPKTINFO = 15 + IPV6_RECVRTHDR = 25 + IPV6_RECVTCLASS = 31 + IPV6_RTHDR = 21 + IPV6_RTHDRDSTOPTS = 24 + IPV6_RTHDR_TYPE_0 = 0 + IPV6_TCLASS = 30 + IPV6_UNICAST_HOPS = 3 + IPV6_USE_MIN_MTU = 18 + IPV6_V6ONLY = 10 + IP_ADD_MEMBERSHIP = 5 + IP_ADD_SOURCE_MEMBERSHIP = 12 + IP_BLOCK_SOURCE = 10 + IP_DEFAULT_MULTICAST_LOOP = 1 + IP_DEFAULT_MULTICAST_TTL = 1 + IP_DROP_MEMBERSHIP = 6 + IP_DROP_SOURCE_MEMBERSHIP = 13 + IP_MAX_MEMBERSHIPS = 20 + IP_MULTICAST_IF = 7 + IP_MULTICAST_LOOP = 4 + IP_MULTICAST_TTL = 3 + IP_OPTIONS = 1 + IP_PKTINFO = 101 + IP_RECVPKTINFO = 102 + IP_TOS = 2 + IP_TTL = 3 + IP_UNBLOCK_SOURCE = 11 + ICANON = 0x0010 + ICMP6_FILTER = 0x26 + ICRNL = 0x0002 + IEXTEN = 0x0020 + IGNBRK = 0x0004 + IGNCR = 0x0008 + INLCR = 0x0020 + ISIG = 0x0040 + ISTRIP = 0x0080 + IXON = 0x0200 + IXOFF = 0x0100 + LOCK_SH = 0x1 // Not exist on zOS + LOCK_EX = 0x2 // Not exist on zOS + LOCK_NB = 0x4 // Not exist on zOS + LOCK_UN = 0x8 // Not exist on zOS + POLLIN = 0x0003 + POLLOUT = 0x0004 + POLLPRI = 0x0010 + POLLERR = 0x0020 + POLLHUP = 0x0040 + POLLNVAL = 0x0080 + PROT_READ = 0x1 // mmap - page can be read + PROT_WRITE = 0x2 // page can be written + PROT_NONE = 0x4 // can't be accessed + PROT_EXEC = 0x8 // can be executed + MAP_PRIVATE = 0x1 // changes are private + MAP_SHARED = 0x2 // changes are shared + MAP_FIXED = 0x4 // place exactly + MCAST_JOIN_GROUP = 40 + MCAST_LEAVE_GROUP = 41 + MCAST_JOIN_SOURCE_GROUP = 42 + MCAST_LEAVE_SOURCE_GROUP = 43 + MCAST_BLOCK_SOURCE = 44 + MCAST_UNBLOCK_SOURCE = 45 + MS_SYNC = 0x1 // msync - synchronous writes + MS_ASYNC = 0x2 // asynchronous writes + MS_INVALIDATE = 0x4 // invalidate mappings + MTM_RDONLY = 0x80000000 + MTM_RDWR = 0x40000000 + MTM_UMOUNT = 0x10000000 + MTM_IMMED = 0x08000000 + MTM_FORCE = 0x04000000 + MTM_DRAIN = 0x02000000 + MTM_RESET = 0x01000000 + MTM_SAMEMODE = 0x00100000 + MTM_UNQSEFORCE = 0x00040000 + MTM_NOSUID = 0x00000400 + MTM_SYNCHONLY = 0x00000200 + MTM_REMOUNT = 0x00000100 + MTM_NOSECURITY = 0x00000080 + NFDBITS = 0x20 + O_ACCMODE = 0x03 + O_APPEND = 0x08 + O_ASYNCSIG = 0x0200 + O_CREAT = 0x80 + O_EXCL = 0x40 + O_GETFL = 0x0F + O_LARGEFILE = 0x0400 + O_NONBLOCK = 0x04 + O_RDONLY = 0x02 + O_RDWR = 0x03 + O_SYNC = 0x0100 + O_TRUNC = 0x10 + O_WRONLY = 0x01 + O_NOCTTY = 0x20 + OPOST = 0x0001 + ONLCR = 0x0004 + PARENB = 0x0200 + PARMRK = 0x0400 + QUERYCVT = 3 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 // RUSAGE_THREAD unsupported on z/OS + SEEK_CUR = 1 + SEEK_END = 2 + SEEK_SET = 0 + SETAUTOCVTALL = 5 + SETAUTOCVTON = 2 + SETCVTALL = 4 + SETCVTOFF = 0 + SETCVTON = 1 + AF_APPLETALK = 16 + AF_CCITT = 10 + AF_CHAOS = 5 + AF_DATAKIT = 9 + AF_DLI = 13 + AF_ECMA = 8 + AF_HYLINK = 15 + AF_IMPLINK = 3 + AF_INET = 2 + AF_INET6 = 19 + AF_INTF = 20 + AF_IUCV = 17 + AF_LAT = 14 + AF_LINK = 18 + AF_MAX = 30 + AF_NBS = 7 + AF_NDD = 23 + AF_NETWARE = 22 + AF_NS = 6 + AF_PUP = 4 + AF_RIF = 21 + AF_ROUTE = 20 + AF_SNA = 11 + AF_UNIX = 1 + AF_UNSPEC = 0 + IBMTCP_IMAGE = 1 + MSG_ACK_EXPECTED = 0x10 + MSG_ACK_GEN = 0x40 + MSG_ACK_TIMEOUT = 0x20 + MSG_CONNTERM = 0x80 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_EOF = 0x8000 + MSG_EOR = 0x8 + MSG_MAXIOVLEN = 16 + MSG_NONBLOCK = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + PRIO_PROCESS = 1 + PRIO_PGRP = 2 + PRIO_USER = 3 + RLIMIT_CPU = 0 + RLIMIT_FSIZE = 1 + RLIMIT_DATA = 2 + RLIMIT_STACK = 3 + RLIMIT_CORE = 4 + RLIMIT_AS = 5 + RLIMIT_NOFILE = 6 + RLIMIT_MEMLIMIT = 7 + RLIM_INFINITY = 2147483647 + SCM_RIGHTS = 0x01 + SF_CLOSE = 0x00000002 + SF_REUSE = 0x00000001 + SHUT_RD = 0 + SHUT_RDWR = 2 + SHUT_WR = 1 + SOCK_CONN_DGRAM = 6 + SOCK_DGRAM = 2 + SOCK_RAW = 3 + SOCK_RDM = 4 + SOCK_SEQPACKET = 5 + SOCK_STREAM = 1 + SOL_SOCKET = 0xffff + SOMAXCONN = 10 + SO_ACCEPTCONN = 0x0002 + SO_ACCEPTECONNABORTED = 0x0006 + SO_ACKNOW = 0x7700 + SO_BROADCAST = 0x0020 + SO_BULKMODE = 0x8000 + SO_CKSUMRECV = 0x0800 + SO_CLOSE = 0x01 + SO_CLUSTERCONNTYPE = 0x00004001 + SO_CLUSTERCONNTYPE_INTERNAL = 8 + SO_CLUSTERCONNTYPE_NOCONN = 0 + SO_CLUSTERCONNTYPE_NONE = 1 + SO_CLUSTERCONNTYPE_SAME_CLUSTER = 2 + SO_CLUSTERCONNTYPE_SAME_IMAGE = 4 + SO_DEBUG = 0x0001 + SO_DONTROUTE = 0x0010 + SO_ERROR = 0x1007 + SO_IGNOREINCOMINGPUSH = 0x1 + SO_IGNORESOURCEVIPA = 0x0002 + SO_KEEPALIVE = 0x0008 + SO_LINGER = 0x0080 + SO_NONBLOCKLOCAL = 0x8001 + SO_NOREUSEADDR = 0x1000 + SO_OOBINLINE = 0x0100 + SO_OPTACK = 0x8004 + SO_OPTMSS = 0x8003 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x0004 + SO_REUSEPORT = 0x0200 + SO_SECINFO = 0x00004002 + SO_SET = 0x0200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TYPE = 0x1008 + SO_UNSET = 0x0400 + SO_USELOOPBACK = 0x0040 + SO_USE_IFBUFS = 0x0400 + S_ISUID = 0x0800 + S_ISGID = 0x0400 + S_ISVTX = 0x0200 + S_IRUSR = 0x0100 + S_IWUSR = 0x0080 + S_IXUSR = 0x0040 + S_IRWXU = 0x01C0 + S_IRGRP = 0x0020 + S_IWGRP = 0x0010 + S_IXGRP = 0x0008 + S_IRWXG = 0x0038 + S_IROTH = 0x0004 + S_IWOTH = 0x0002 + S_IXOTH = 0x0001 + S_IRWXO = 0x0007 + S_IREAD = S_IRUSR + S_IWRITE = S_IWUSR + S_IEXEC = S_IXUSR + S_IFDIR = 0x01000000 + S_IFCHR = 0x02000000 + S_IFREG = 0x03000000 + S_IFFIFO = 0x04000000 + S_IFIFO = 0x04000000 + S_IFLNK = 0x05000000 + S_IFBLK = 0x06000000 + S_IFSOCK = 0x07000000 + S_IFVMEXTL = 0xFE000000 + S_IFVMEXTL_EXEC = 0x00010000 + S_IFVMEXTL_DATA = 0x00020000 + S_IFVMEXTL_MEL = 0x00030000 + S_IFEXTL = 0x00000001 + S_IFPROGCTL = 0x00000002 + S_IFAPFCTL = 0x00000004 + S_IFNOSHARE = 0x00000008 + S_IFSHARELIB = 0x00000010 + S_IFMT = 0xFF000000 + S_IFMST = 0x00FF0000 + TCP_KEEPALIVE = 0x8 + TCP_NODELAY = 0x1 + TCP_INFO = 0xb + TCP_USER_TIMEOUT = 0x1 + TIOCGWINSZ = 0x4008a368 + TIOCSWINSZ = 0x8008a367 + TIOCSBRK = 0x2000a77b + TIOCCBRK = 0x2000a77a + TIOCSTI = 0x8001a772 + TIOCGPGRP = 0x4004a777 // _IOR(167, 119, int) + TCSANOW = 0 + TCSETS = 0 // equivalent to TCSANOW for tcsetattr + TCSADRAIN = 1 + TCSETSW = 1 // equivalent to TCSADRAIN for tcsetattr + TCSAFLUSH = 2 + TCSETSF = 2 // equivalent to TCSAFLUSH for tcsetattr + TCGETS = 3 // not defined in ioctl.h -- zos golang only + TCIFLUSH = 0 + TCOFLUSH = 1 + TCIOFLUSH = 2 + TCOOFF = 0 + TCOON = 1 + TCIOFF = 2 + TCION = 3 + TIOCSPGRP = 0x8004a776 + TIOCNOTTY = 0x2000a771 + TIOCEXCL = 0x2000a70d + TIOCNXCL = 0x2000a70e + TIOCGETD = 0x4004a700 + TIOCSETD = 0x8004a701 + TIOCPKT = 0x8004a770 + TIOCSTOP = 0x2000a76f + TIOCSTART = 0x2000a76e + TIOCUCNTL = 0x8004a766 + TIOCREMOTE = 0x8004a769 + TIOCMGET = 0x4004a76a + TIOCMSET = 0x8004a76d + TIOCMBIC = 0x8004a76b + TIOCMBIS = 0x8004a76c + VINTR = 0 + VQUIT = 1 + VERASE = 2 + VKILL = 3 + VEOF = 4 + VEOL = 5 + VMIN = 6 + VSTART = 7 + VSTOP = 8 + VSUSP = 9 + VTIME = 10 + WCONTINUED = 0x4 + WNOHANG = 0x1 + WUNTRACED = 0x2 + _BPX_SWAP = 1 + _BPX_NONSWAP = 2 + MCL_CURRENT = 1 // for Linux compatibility -- no zos semantics + MCL_FUTURE = 2 // for Linux compatibility -- no zos semantics + MCL_ONFAULT = 3 // for Linux compatibility -- no zos semantics + MADV_NORMAL = 0 // for Linux compatibility -- no zos semantics + MADV_RANDOM = 1 // for Linux compatibility -- no zos semantics + MADV_SEQUENTIAL = 2 // for Linux compatibility -- no zos semantics + MADV_WILLNEED = 3 // for Linux compatibility -- no zos semantics + MADV_REMOVE = 4 // for Linux compatibility -- no zos semantics + MADV_DONTFORK = 5 // for Linux compatibility -- no zos semantics + MADV_DOFORK = 6 // for Linux compatibility -- no zos semantics + MADV_HWPOISON = 7 // for Linux compatibility -- no zos semantics + MADV_MERGEABLE = 8 // for Linux compatibility -- no zos semantics + MADV_UNMERGEABLE = 9 // for Linux compatibility -- no zos semantics + MADV_SOFT_OFFLINE = 10 // for Linux compatibility -- no zos semantics + MADV_HUGEPAGE = 11 // for Linux compatibility -- no zos semantics + MADV_NOHUGEPAGE = 12 // for Linux compatibility -- no zos semantics + MADV_DONTDUMP = 13 // for Linux compatibility -- no zos semantics + MADV_DODUMP = 14 // for Linux compatibility -- no zos semantics + MADV_FREE = 15 // for Linux compatibility -- no zos semantics + MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics + MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics + AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics + AT_FDCWD = 2 // for Unix compatibility -- no zos semantics +) + +const ( + EDOM = Errno(1) + ERANGE = Errno(2) + EACCES = Errno(111) + EAGAIN = Errno(112) + EBADF = Errno(113) + EBUSY = Errno(114) + ECHILD = Errno(115) + EDEADLK = Errno(116) + EEXIST = Errno(117) + EFAULT = Errno(118) + EFBIG = Errno(119) + EINTR = Errno(120) + EINVAL = Errno(121) + EIO = Errno(122) + EISDIR = Errno(123) + EMFILE = Errno(124) + EMLINK = Errno(125) + ENAMETOOLONG = Errno(126) + ENFILE = Errno(127) + ENODEV = Errno(128) + ENOENT = Errno(129) + ENOEXEC = Errno(130) + ENOLCK = Errno(131) + ENOMEM = Errno(132) + ENOSPC = Errno(133) + ENOSYS = Errno(134) + ENOTDIR = Errno(135) + ENOTEMPTY = Errno(136) + ENOTTY = Errno(137) + ENXIO = Errno(138) + EPERM = Errno(139) + EPIPE = Errno(140) + EROFS = Errno(141) + ESPIPE = Errno(142) + ESRCH = Errno(143) + EXDEV = Errno(144) + E2BIG = Errno(145) + ELOOP = Errno(146) + EILSEQ = Errno(147) + ENODATA = Errno(148) + EOVERFLOW = Errno(149) + EMVSNOTUP = Errno(150) + ECMSSTORAGE = Errno(151) + EMVSDYNALC = Errno(151) + EMVSCVAF = Errno(152) + EMVSCATLG = Errno(153) + ECMSINITIAL = Errno(156) + EMVSINITIAL = Errno(156) + ECMSERR = Errno(157) + EMVSERR = Errno(157) + EMVSPARM = Errno(158) + ECMSPFSFILE = Errno(159) + EMVSPFSFILE = Errno(159) + EMVSBADCHAR = Errno(160) + ECMSPFSPERM = Errno(162) + EMVSPFSPERM = Errno(162) + EMVSSAFEXTRERR = Errno(163) + EMVSSAF2ERR = Errno(164) + EMVSTODNOTSET = Errno(165) + EMVSPATHOPTS = Errno(166) + EMVSNORTL = Errno(167) + EMVSEXPIRE = Errno(168) + EMVSPASSWORD = Errno(169) + EMVSWLMERROR = Errno(170) + EMVSCPLERROR = Errno(171) + EMVSARMERROR = Errno(172) + ELENOFORK = Errno(200) + ELEMSGERR = Errno(201) + EFPMASKINV = Errno(202) + EFPMODEINV = Errno(203) + EBUFLEN = Errno(227) + EEXTLINK = Errno(228) + ENODD = Errno(229) + ECMSESMERR = Errno(230) + ECPERR = Errno(231) + ELEMULTITHREAD = Errno(232) + ELEFENCE = Errno(244) + EBADDATA = Errno(245) + EUNKNOWN = Errno(246) + ENOTSUP = Errno(247) + EBADNAME = Errno(248) + ENOTSAFE = Errno(249) + ELEMULTITHREADFORK = Errno(257) + ECUNNOENV = Errno(258) + ECUNNOCONV = Errno(259) + ECUNNOTALIGNED = Errno(260) + ECUNERR = Errno(262) + EIBMBADCALL = Errno(1000) + EIBMBADPARM = Errno(1001) + EIBMSOCKOUTOFRANGE = Errno(1002) + EIBMSOCKINUSE = Errno(1003) + EIBMIUCVERR = Errno(1004) + EOFFLOADboxERROR = Errno(1005) + EOFFLOADboxRESTART = Errno(1006) + EOFFLOADboxDOWN = Errno(1007) + EIBMCONFLICT = Errno(1008) + EIBMCANCELLED = Errno(1009) + EIBMBADTCPNAME = Errno(1011) + ENOTBLK = Errno(1100) + ETXTBSY = Errno(1101) + EWOULDBLOCK = Errno(1102) + EINPROGRESS = Errno(1103) + EALREADY = Errno(1104) + ENOTSOCK = Errno(1105) + EDESTADDRREQ = Errno(1106) + EMSGSIZE = Errno(1107) + EPROTOTYPE = Errno(1108) + ENOPROTOOPT = Errno(1109) + EPROTONOSUPPORT = Errno(1110) + ESOCKTNOSUPPORT = Errno(1111) + EOPNOTSUPP = Errno(1112) + EPFNOSUPPORT = Errno(1113) + EAFNOSUPPORT = Errno(1114) + EADDRINUSE = Errno(1115) + EADDRNOTAVAIL = Errno(1116) + ENETDOWN = Errno(1117) + ENETUNREACH = Errno(1118) + ENETRESET = Errno(1119) + ECONNABORTED = Errno(1120) + ECONNRESET = Errno(1121) + ENOBUFS = Errno(1122) + EISCONN = Errno(1123) + ENOTCONN = Errno(1124) + ESHUTDOWN = Errno(1125) + ETOOMANYREFS = Errno(1126) + ETIMEDOUT = Errno(1127) + ECONNREFUSED = Errno(1128) + EHOSTDOWN = Errno(1129) + EHOSTUNREACH = Errno(1130) + EPROCLIM = Errno(1131) + EUSERS = Errno(1132) + EDQUOT = Errno(1133) + ESTALE = Errno(1134) + EREMOTE = Errno(1135) + ENOSTR = Errno(1136) + ETIME = Errno(1137) + ENOSR = Errno(1138) + ENOMSG = Errno(1139) + EBADMSG = Errno(1140) + EIDRM = Errno(1141) + ENONET = Errno(1142) + ERREMOTE = Errno(1143) + ENOLINK = Errno(1144) + EADV = Errno(1145) + ESRMNT = Errno(1146) + ECOMM = Errno(1147) + EPROTO = Errno(1148) + EMULTIHOP = Errno(1149) + EDOTDOT = Errno(1150) + EREMCHG = Errno(1151) + ECANCELED = Errno(1152) + EINTRNODATA = Errno(1159) + ENOREUSE = Errno(1160) + ENOMOVE = Errno(1161) +) + +// Signals +const ( + SIGHUP = Signal(1) + SIGINT = Signal(2) + SIGABRT = Signal(3) + SIGILL = Signal(4) + SIGPOLL = Signal(5) + SIGURG = Signal(6) + SIGSTOP = Signal(7) + SIGFPE = Signal(8) + SIGKILL = Signal(9) + SIGBUS = Signal(10) + SIGSEGV = Signal(11) + SIGSYS = Signal(12) + SIGPIPE = Signal(13) + SIGALRM = Signal(14) + SIGTERM = Signal(15) + SIGUSR1 = Signal(16) + SIGUSR2 = Signal(17) + SIGABND = Signal(18) + SIGCONT = Signal(19) + SIGCHLD = Signal(20) + SIGTTIN = Signal(21) + SIGTTOU = Signal(22) + SIGIO = Signal(23) + SIGQUIT = Signal(24) + SIGTSTP = Signal(25) + SIGTRAP = Signal(26) + SIGIOERR = Signal(27) + SIGWINCH = Signal(28) + SIGXCPU = Signal(29) + SIGXFSZ = Signal(30) + SIGVTALRM = Signal(31) + SIGPROF = Signal(32) + SIGDANGER = Signal(33) + SIGTHSTOP = Signal(34) + SIGTHCONT = Signal(35) + SIGTRACE = Signal(37) + SIGDCE = Signal(38) + SIGDUMP = Signal(39) +) + +// Error table +var errorList = [...]struct { + num Errno + name string + desc string +}{ + {1, "EDC5001I", "A domain error occurred."}, + {2, "EDC5002I", "A range error occurred."}, + {111, "EDC5111I", "Permission denied."}, + {112, "EDC5112I", "Resource temporarily unavailable."}, + {113, "EDC5113I", "Bad file descriptor."}, + {114, "EDC5114I", "Resource busy."}, + {115, "EDC5115I", "No child processes."}, + {116, "EDC5116I", "Resource deadlock avoided."}, + {117, "EDC5117I", "File exists."}, + {118, "EDC5118I", "Incorrect address."}, + {119, "EDC5119I", "File too large."}, + {120, "EDC5120I", "Interrupted function call."}, + {121, "EDC5121I", "Invalid argument."}, + {122, "EDC5122I", "Input/output error."}, + {123, "EDC5123I", "Is a directory."}, + {124, "EDC5124I", "Too many open files."}, + {125, "EDC5125I", "Too many links."}, + {126, "EDC5126I", "Filename too long."}, + {127, "EDC5127I", "Too many open files in system."}, + {128, "EDC5128I", "No such device."}, + {129, "EDC5129I", "No such file or directory."}, + {130, "EDC5130I", "Exec format error."}, + {131, "EDC5131I", "No locks available."}, + {132, "EDC5132I", "Not enough memory."}, + {133, "EDC5133I", "No space left on device."}, + {134, "EDC5134I", "Function not implemented."}, + {135, "EDC5135I", "Not a directory."}, + {136, "EDC5136I", "Directory not empty."}, + {137, "EDC5137I", "Inappropriate I/O control operation."}, + {138, "EDC5138I", "No such device or address."}, + {139, "EDC5139I", "Operation not permitted."}, + {140, "EDC5140I", "Broken pipe."}, + {141, "EDC5141I", "Read-only file system."}, + {142, "EDC5142I", "Invalid seek."}, + {143, "EDC5143I", "No such process."}, + {144, "EDC5144I", "Improper link."}, + {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, + {146, "EDC5146I", "Too many levels of symbolic links."}, + {147, "EDC5147I", "Illegal byte sequence."}, + {148, "", ""}, + {149, "EDC5149I", "Value Overflow Error."}, + {150, "EDC5150I", "UNIX System Services is not active."}, + {151, "EDC5151I", "Dynamic allocation error."}, + {152, "EDC5152I", "Common VTOC access facility (CVAF) error."}, + {153, "EDC5153I", "Catalog obtain error."}, + {156, "EDC5156I", "Process initialization error."}, + {157, "EDC5157I", "An internal error has occurred."}, + {158, "EDC5158I", "Bad parameters were passed to the service."}, + {159, "EDC5159I", "The Physical File System encountered a permanent file error."}, + {160, "EDC5160I", "Bad character in environment variable name."}, + {162, "EDC5162I", "The Physical File System encountered a system error."}, + {163, "EDC5163I", "SAF/RACF extract error."}, + {164, "EDC5164I", "SAF/RACF error."}, + {165, "EDC5165I", "System TOD clock not set."}, + {166, "EDC5166I", "Access mode argument on function call conflicts with PATHOPTS parameter on JCL DD statement."}, + {167, "EDC5167I", "Access to the UNIX System Services version of the C RTL is denied."}, + {168, "EDC5168I", "Password has expired."}, + {169, "EDC5169I", "Password is invalid."}, + {170, "EDC5170I", "An error was encountered with WLM."}, + {171, "EDC5171I", "An error was encountered with CPL."}, + {172, "EDC5172I", "An error was encountered with Application Response Measurement (ARM) component."}, + {200, "EDC5200I", "The application contains a Language Environment member language that cannot tolerate a fork()."}, + {201, "EDC5201I", "The Language Environment message file was not found in the hierarchical file system."}, + {202, "EDC5202E", "DLL facilities are not supported under SPC environment."}, + {203, "EDC5203E", "DLL facilities are not supported under POSIX environment."}, + {227, "EDC5227I", "Buffer is not long enough to contain a path definition"}, + {228, "EDC5228I", "The file referred to is an external link"}, + {229, "EDC5229I", "No path definition for ddname in effect"}, + {230, "EDC5230I", "ESM error."}, + {231, "EDC5231I", "CP or the external security manager had an error"}, + {232, "EDC5232I", "The function failed because it was invoked from a multithread environment."}, + {244, "EDC5244I", "The program, module or DLL is not supported in this environment."}, + {245, "EDC5245I", "Data is not valid."}, + {246, "EDC5246I", "Unknown system state."}, + {247, "EDC5247I", "Operation not supported."}, + {248, "EDC5248I", "The object name specified is not correct."}, + {249, "EDC5249I", "The function is not allowed."}, + {257, "EDC5257I", "Function cannot be called in the child process of a fork() from a multithreaded process until exec() is called."}, + {258, "EDC5258I", "A CUN_RS_NO_UNI_ENV error was issued by Unicode Services."}, + {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, + {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, + {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, + {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, + {1001, "EDC8001I", "An error was found in the IUCV header."}, + {1002, "EDC8002I", "A socket descriptor is out of range."}, + {1003, "EDC8003I", "A socket descriptor is in use."}, + {1004, "EDC8004I", "Request failed because of an IUCV error."}, + {1005, "EDC8005I", "Offload box error."}, + {1006, "EDC8006I", "Offload box restarted."}, + {1007, "EDC8007I", "Offload box down."}, + {1008, "EDC8008I", "Already a conflicting call outstanding on socket."}, + {1009, "EDC8009I", "Request cancelled using a SOCKcallCANCEL request."}, + {1011, "EDC8011I", "A name of a PFS was specified that either is not configured or is not a Sockets PFS."}, + {1100, "EDC8100I", "Block device required."}, + {1101, "EDC8101I", "Text file busy."}, + {1102, "EDC8102I", "Operation would block."}, + {1103, "EDC8103I", "Operation now in progress."}, + {1104, "EDC8104I", "Connection already in progress."}, + {1105, "EDC8105I", "Socket operation on non-socket."}, + {1106, "EDC8106I", "Destination address required."}, + {1107, "EDC8107I", "Message too long."}, + {1108, "EDC8108I", "Protocol wrong type for socket."}, + {1109, "EDC8109I", "Protocol not available."}, + {1110, "EDC8110I", "Protocol not supported."}, + {1111, "EDC8111I", "Socket type not supported."}, + {1112, "EDC8112I", "Operation not supported on socket."}, + {1113, "EDC8113I", "Protocol family not supported."}, + {1114, "EDC8114I", "Address family not supported."}, + {1115, "EDC8115I", "Address already in use."}, + {1116, "EDC8116I", "Address not available."}, + {1117, "EDC8117I", "Network is down."}, + {1118, "EDC8118I", "Network is unreachable."}, + {1119, "EDC8119I", "Network dropped connection on reset."}, + {1120, "EDC8120I", "Connection ended abnormally."}, + {1121, "EDC8121I", "Connection reset."}, + {1122, "EDC8122I", "No buffer space available."}, + {1123, "EDC8123I", "Socket already connected."}, + {1124, "EDC8124I", "Socket not connected."}, + {1125, "EDC8125I", "Can't send after socket shutdown."}, + {1126, "EDC8126I", "Too many references; can't splice."}, + {1127, "EDC8127I", "Connection timed out."}, + {1128, "EDC8128I", "Connection refused."}, + {1129, "EDC8129I", "Host is not available."}, + {1130, "EDC8130I", "Host cannot be reached."}, + {1131, "EDC8131I", "Too many processes."}, + {1132, "EDC8132I", "Too many users."}, + {1133, "EDC8133I", "Disk quota exceeded."}, + {1134, "EDC8134I", "Stale file handle."}, + {1135, "", ""}, + {1136, "EDC8136I", "File is not a STREAM."}, + {1137, "EDC8137I", "STREAMS ioctl() timeout."}, + {1138, "EDC8138I", "No STREAMS resources."}, + {1139, "EDC8139I", "The message identified by set_id and msg_id is not in the message catalog."}, + {1140, "EDC8140I", "Bad message."}, + {1141, "EDC8141I", "Identifier removed."}, + {1142, "", ""}, + {1143, "", ""}, + {1144, "EDC8144I", "The link has been severed."}, + {1145, "", ""}, + {1146, "", ""}, + {1147, "", ""}, + {1148, "EDC8148I", "Protocol error."}, + {1149, "EDC8149I", "Multihop not allowed."}, + {1150, "", ""}, + {1151, "", ""}, + {1152, "EDC8152I", "The asynchronous I/O request has been canceled."}, + {1159, "EDC8159I", "Function call was interrupted before any data was received."}, + {1160, "EDC8160I", "Socket reuse is not supported."}, + {1161, "EDC8161I", "The file system cannot currently be moved."}, +} + +// Signal table +var signalList = [...]struct { + num Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGABT", "aborted"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGPOLL", "pollable event"}, + {6, "SIGURG", "urgent I/O condition"}, + {7, "SIGSTOP", "stop process"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad argument to routine"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGABND", "abend"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGQUIT", "quit"}, + {25, "SIGTSTP", "stopped"}, + {26, "SIGTRAP", "trace/breakpoint trap"}, + {27, "SIGIOER", "I/O error"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGXCPU", "CPU time limit exceeded"}, + {30, "SIGXFSZ", "file size limit exceeded"}, + {31, "SIGVTALRM", "virtual timer expired"}, + {32, "SIGPROF", "profiling timer expired"}, + {33, "SIGDANGER", "danger"}, + {34, "SIGTHSTOP", "stop thread"}, + {35, "SIGTHCONT", "continue thread"}, + {37, "SIGTRACE", "trace"}, + {38, "", "DCE"}, + {39, "SIGDUMP", "dump"}, +} diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 89c5920e0..bd001a6e1 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. +//go:build linux && (arm || arm64) // +build linux // +build arm arm64 diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index 24b841eec..c34d0639b 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. +//go:build linux && (mips || mips64) // +build linux // +build mips mips64 diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 47b048956..3ccf0c0c4 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. +//go:build linux && (mipsle || mips64le) // +build linux // +build mipsle mips64le diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index ea5d9cb53..7d6585700 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. +//go:build linux && (386 || amd64) // +build linux // +build 386 amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index ed657ff1b..91a23cc72 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -1,6 +1,7 @@ // go run mksyscall_aix_ppc.go -aix -tags aix,ppc syscall_aix.go syscall_aix_ppc.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build aix && ppc // +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 664b293b4..33c2609b8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -1,6 +1,7 @@ // go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build aix && ppc64 // +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 0550da06d..8b737fa97 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -1,8 +1,8 @@ // go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go // Code generated by the command above; see README.md. DO NOT EDIT. -// +build aix,ppc64 -// +build gc +//go:build aix && ppc64 && gc +// +build aix,ppc64,gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index cde4dbc5f..3c260917e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -1,8 +1,8 @@ // go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go // Code generated by the command above; see README.md. DO NOT EDIT. -// +build aix,ppc64 -// +build gccgo +//go:build aix && ppc64 && gccgo +// +build aix,ppc64,gccgo package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go deleted file mode 100644 index c8c142c59..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go +++ /dev/null @@ -1,39 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,386,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,386,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_closedir_trampoline() - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -func libc_readdir_r_trampoline() - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s deleted file mode 100644 index 00da1ebfc..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s +++ /dev/null @@ -1,12 +0,0 @@ -// go run mkasm_darwin.go 386 -// Code generated by the command above; DO NOT EDIT. - -// +build go1.13 - -#include "textflag.h" -TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) -TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) -TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go deleted file mode 100644 index 387718348..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ /dev/null @@ -1,2430 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,386,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,386,go1.12 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getgroups_trampoline() - -//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setgroups_trampoline() - -//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_wait4_trampoline() - -//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_accept_trampoline() - -//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_bind_trampoline() - -//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_connect_trampoline() - -//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_socket_trampoline() - -//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsockopt_trampoline() - -//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setsockopt_trampoline() - -//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpeername_trampoline() - -//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsockname_trampoline() - -//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_shutdown_trampoline() - -//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_socketpair_trampoline() - -//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_recvfrom_trampoline() - -//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendto_trampoline() - -//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_recvmsg_trampoline() - -//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendmsg_trampoline() - -//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kevent_trampoline() - -//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_utimes_trampoline() - -//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_futimes_trampoline() - -//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_poll_trampoline() - -//go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_madvise_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_madvise_trampoline() - -//go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mlock_trampoline() - -//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mlockall_trampoline() - -//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mprotect_trampoline() - -//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_msync_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_msync_trampoline() - -//go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munlock_trampoline() - -//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munlockall_trampoline() - -//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pipe_trampoline() - -//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_getxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getxattr_trampoline() - -//go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_fgetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fgetxattr_trampoline() - -//go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_setxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setxattr_trampoline() - -//go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fsetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fsetxattr_trampoline() - -//go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_removexattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_removexattr_trampoline() - -//go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_fremovexattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fremovexattr_trampoline() - -//go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_listxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_listxattr_trampoline() - -//go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_flistxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_flistxattr_trampoline() - -//go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setattrlist_trampoline() - -//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kill_trampoline() - -//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ioctl_trampoline() - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sysctl_trampoline() - -//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := syscall_syscall9(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendfile_trampoline() - -//go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_access_trampoline() - -//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_adjtime_trampoline() - -//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chdir_trampoline() - -//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chflags_trampoline() - -//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chmod_trampoline() - -//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chown_trampoline() - -//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chroot_trampoline() - -//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clock_gettime_trampoline() - -//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_close_trampoline() - -//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Clonefile(src string, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(src) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clonefile_trampoline() - -//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(src) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clonefileat_trampoline() - -//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_dup_trampoline() - -//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_dup2_trampoline() - -//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_exchangedata_trampoline() - -//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - syscall_syscall(funcPC(libc_exit_trampoline), uintptr(code), 0, 0) - return -} - -func libc_exit_trampoline() - -//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_faccessat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_faccessat_trampoline() - -//go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchdir_trampoline() - -//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchflags_trampoline() - -//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchmod_trampoline() - -//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fchmodat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchmodat_trampoline() - -//go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchown_trampoline() - -//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fchownat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchownat_trampoline() - -//go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fclonefileat_trampoline() - -//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_flock_trampoline() - -//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fpathconf_trampoline() - -//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fsync_trampoline() - -//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ftruncate_trampoline() - -//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getcwd_trampoline() - -//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdtablesize() (size int) { - r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) - size = int(r0) - return -} - -func libc_getdtablesize_trampoline() - -//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) - egid = int(r0) - return -} - -func libc_getegid_trampoline() - -//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) - uid = int(r0) - return -} - -func libc_geteuid_trampoline() - -//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) - gid = int(r0) - return -} - -func libc_getgid_trampoline() - -//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpgid_trampoline() - -//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) - pgrp = int(r0) - return -} - -func libc_getpgrp_trampoline() - -//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) - pid = int(r0) - return -} - -func libc_getpid_trampoline() - -//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) - ppid = int(r0) - return -} - -func libc_getppid_trampoline() - -//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpriority_trampoline() - -//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getrlimit_trampoline() - -//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getrusage_trampoline() - -//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsid_trampoline() - -//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) - uid = int(r0) - return -} - -func libc_getuid_trampoline() - -//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -func libc_issetugid_trampoline() - -//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kqueue_trampoline() - -//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lchown_trampoline() - -//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_link_trampoline() - -//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_linkat_trampoline), uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_linkat_trampoline() - -//go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_listen_trampoline() - -//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkdir_trampoline() - -//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkdirat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkdirat_trampoline() - -//go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkfifo_trampoline() - -//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mknod_trampoline() - -//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_open_trampoline() - -//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_openat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_openat_trampoline() - -//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pathconf_trampoline() - -//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pread_trampoline() - -//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pwrite_trampoline() - -//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_read_trampoline() - -//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_readlink_trampoline() - -//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_readlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_readlinkat_trampoline() - -//go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_rename_trampoline() - -//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_renameat_trampoline), uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_renameat_trampoline() - -//go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_revoke_trampoline() - -//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_rmdir_trampoline() - -//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := syscall_syscall6(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) - newoffset = int64(int64(r1)<<32 | int64(r0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lseek_trampoline() - -//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_select_trampoline() - -//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setegid_trampoline() - -//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_seteuid_trampoline() - -//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setgid_trampoline() - -//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setlogin_trampoline() - -//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setpgid_trampoline() - -//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setpriority_trampoline() - -//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setprivexec(flag int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setprivexec_trampoline() - -//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setregid_trampoline() - -//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setreuid_trampoline() - -//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setrlimit_trampoline() - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setsid_trampoline() - -//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_settimeofday_trampoline() - -//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setuid_trampoline() - -//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_symlink_trampoline() - -//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_symlinkat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_symlinkat_trampoline() - -//go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_sync_trampoline), 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sync_trampoline() - -//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_truncate_trampoline() - -//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -func libc_umask_trampoline() - -//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_undelete_trampoline() - -//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unlink_trampoline() - -//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unlinkat_trampoline() - -//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unmount_trampoline() - -//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_write_trampoline() - -//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall9(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mmap_trampoline() - -//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munmap_trampoline() - -//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstat64_trampoline() - -//go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fstatat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstatat64_trampoline() - -//go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstatfs64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstatfs64_trampoline() - -//go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat64_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getfsstat64_trampoline() - -//go:cgo_import_dynamic libc_getfsstat64 getfsstat64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_lstat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lstat64_trampoline() - -//go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_stat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_stat64_trampoline() - -//go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_statfs64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_statfs64_trampoline() - -//go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s deleted file mode 100644 index 1c53979a1..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ /dev/null @@ -1,290 +0,0 @@ -// go run mkasm_darwin.go 386 -// Code generated by the command above; DO NOT EDIT. - -// +build go1.12 - -#include "textflag.h" -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 - JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 - JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 - JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 - JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 - JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 - JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 - JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 - JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 - JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 - JMP libc_futimes(SB) -TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 - JMP libc_poll(SB) -TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 - JMP libc_madvise(SB) -TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mlock(SB) -TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mlockall(SB) -TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mprotect(SB) -TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_msync(SB) -TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munlock(SB) -TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munlockall(SB) -TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pipe(SB) -TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getxattr(SB) -TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fgetxattr(SB) -TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setxattr(SB) -TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fsetxattr(SB) -TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_removexattr(SB) -TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fremovexattr(SB) -TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_listxattr(SB) -TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_flistxattr(SB) -TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setattrlist(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kill(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ioctl(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) -TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendfile(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 - JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 - JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chroot(SB) -TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clock_gettime(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 - JMP libc_close(SB) -TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clonefile(SB) -TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clonefileat(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 - JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 - JMP libc_dup2(SB) -TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 - JMP libc_exchangedata(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_exit(SB) -TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_faccessat(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchmod(SB) -TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchmodat(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchown(SB) -TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchownat(SB) -TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fclonefileat(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fpathconf(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ftruncate(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getcwd(SB) -TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getdtablesize(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsid(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_issetugid(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 - JMP libc_link(SB) -TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_linkat(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 - JMP libc_listen(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkdir(SB) -TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkdirat(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mknod(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 - JMP libc_open(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_openat(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 - JMP libc_read(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readlink(SB) -TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readlinkat(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 - JMP libc_rename(SB) -TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_renameat(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 - JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_rmdir(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lseek(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 - JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setpriority(SB) -TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setprivexec(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setuid(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_symlink(SB) -TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_symlinkat(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 - JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 - JMP libc_umask(SB) -TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 - JMP libc_undelete(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unlink(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unlinkat(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 - JMP libc_write(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munmap(SB) -TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstat64(SB) -TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstatat64(SB) -TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstatfs64(SB) -TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getfsstat64(SB) -TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lstat64(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) -TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_stat64(SB) -TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 - JMP libc_statfs64(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go index 888262361..a06eb0932 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && amd64 && go1.13 // +build darwin,amd64,go1.13 package unix @@ -15,25 +16,25 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_closedir_trampoline() +var libc_closedir_trampoline_addr uintptr //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) res = Errno(r0) return } -func libc_readdir_r_trampoline() +var libc_readdir_r_trampoline_addr uintptr //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s index d671e8311..d6c3e25c0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s @@ -1,12 +1,25 @@ // go run mkasm_darwin.go amd64 // Code generated by the command above; DO NOT EDIT. +//go:build go1.13 // +build go1.13 #include "textflag.h" -TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) -TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) -TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 508e5639b..d4efe8d45 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,amd64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && amd64 && go1.12 // +build darwin,amd64,go1.12 package unix @@ -15,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -23,28 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } -func libc_getgroups_trampoline() +var libc_getgroups_trampoline_addr uintptr //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setgroups_trampoline() +var libc_setgroups_trampoline_addr uintptr //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -52,14 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } -func libc_wait4_trampoline() +var libc_wait4_trampoline_addr uintptr //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -67,42 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } -func libc_accept_trampoline() +var libc_accept_trampoline_addr uintptr //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_bind_trampoline() +var libc_bind_trampoline_addr uintptr //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_connect_trampoline() +var libc_connect_trampoline_addr uintptr //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -110,91 +111,91 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } -func libc_socket_trampoline() +var libc_socket_trampoline_addr uintptr //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getsockopt_trampoline() +var libc_getsockopt_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setsockopt_trampoline() +var libc_setsockopt_trampoline_addr uintptr //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getpeername_trampoline() +var libc_getpeername_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getsockname_trampoline() +var libc_getsockname_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_shutdown_trampoline() +var libc_shutdown_trampoline_addr uintptr //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_socketpair_trampoline() +var libc_socketpair_trampoline_addr uintptr //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" @@ -207,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -215,7 +216,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } -func libc_recvfrom_trampoline() +var libc_recvfrom_trampoline_addr uintptr //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" @@ -228,21 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sendto_trampoline() +var libc_sendto_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -250,14 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -func libc_recvmsg_trampoline() +var libc_recvmsg_trampoline_addr uintptr //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -265,14 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -func libc_sendmsg_trampoline() +var libc_sendmsg_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -280,7 +281,7 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -func libc_kevent_trampoline() +var libc_kevent_trampoline_addr uintptr //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" @@ -292,35 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_utimes_trampoline() +var libc_utimes_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_futimes_trampoline() +var libc_futimes_trampoline_addr uintptr //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -328,7 +329,7 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } -func libc_poll_trampoline() +var libc_poll_trampoline_addr uintptr //go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" @@ -341,14 +342,14 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_madvise_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_madvise_trampoline() +var libc_madvise_trampoline_addr uintptr //go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" @@ -361,28 +362,28 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mlock_trampoline() +var libc_mlock_trampoline_addr uintptr //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mlockall_trampoline() +var libc_mlockall_trampoline_addr uintptr //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" @@ -395,14 +396,14 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mprotect_trampoline() +var libc_mprotect_trampoline_addr uintptr //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" @@ -415,14 +416,14 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_msync_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_msync_trampoline() +var libc_msync_trampoline_addr uintptr //go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" @@ -435,42 +436,42 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munlock_trampoline() +var libc_munlock_trampoline_addr uintptr //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munlockall_trampoline() +var libc_munlockall_trampoline_addr uintptr //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_pipe_trampoline() +var libc_pipe_trampoline_addr uintptr //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" @@ -487,7 +488,7 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_getxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + r0, _, e1 := syscall_syscall6(libc_getxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -495,7 +496,7 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o return } -func libc_getxattr_trampoline() +var libc_getxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" @@ -507,7 +508,7 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_fgetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + r0, _, e1 := syscall_syscall6(libc_fgetxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -515,7 +516,7 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio return } -func libc_fgetxattr_trampoline() +var libc_fgetxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" @@ -532,14 +533,14 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_setxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + _, _, e1 := syscall_syscall6(libc_setxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setxattr_trampoline() +var libc_setxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" @@ -551,14 +552,14 @@ func fsetxattr(fd int, attr string, data *byte, size int, position uint32, optio if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fsetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + _, _, e1 := syscall_syscall6(libc_fsetxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fsetxattr_trampoline() +var libc_fsetxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" @@ -575,14 +576,14 @@ func removexattr(path string, attr string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_removexattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_removexattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_removexattr_trampoline() +var libc_removexattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" @@ -594,14 +595,14 @@ func fremovexattr(fd int, attr string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_fremovexattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_fremovexattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fremovexattr_trampoline() +var libc_fremovexattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" @@ -613,7 +614,7 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_listxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + r0, _, e1 := syscall_syscall6(libc_listxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -621,14 +622,14 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro return } -func libc_listxattr_trampoline() +var libc_listxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_flistxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + r0, _, e1 := syscall_syscall6(libc_flistxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -636,28 +637,28 @@ func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { return } -func libc_flistxattr_trampoline() +var libc_flistxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setattrlist_trampoline() +var libc_setattrlist_trampoline_addr uintptr //go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -665,35 +666,35 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } -func libc_fcntl_trampoline() +var libc_fcntl_trampoline_addr uintptr //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_kill_trampoline() +var libc_kill_trampoline_addr uintptr //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ioctl_trampoline() +var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" @@ -706,28 +707,28 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sysctl_trampoline() +var libc_sysctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) + _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sendfile_trampoline() +var libc_sendfile_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" @@ -739,28 +740,28 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_access_trampoline() +var libc_access_trampoline_addr uintptr //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_adjtime_trampoline() +var libc_adjtime_trampoline_addr uintptr //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" @@ -772,14 +773,14 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chdir_trampoline() +var libc_chdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" @@ -791,14 +792,14 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chflags_trampoline() +var libc_chflags_trampoline_addr uintptr //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" @@ -810,14 +811,14 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chmod_trampoline() +var libc_chmod_trampoline_addr uintptr //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" @@ -829,14 +830,14 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chown_trampoline() +var libc_chown_trampoline_addr uintptr //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" @@ -848,42 +849,42 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chroot_trampoline() +var libc_chroot_trampoline_addr uintptr //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clock_gettime_trampoline() +var libc_clock_gettime_trampoline_addr uintptr //go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_close_trampoline() +var libc_close_trampoline_addr uintptr //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" @@ -900,14 +901,14 @@ func Clonefile(src string, dst string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_clonefile_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clonefile_trampoline() +var libc_clonefile_trampoline_addr uintptr //go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" @@ -924,21 +925,21 @@ func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_clonefileat_trampoline_addr, uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clonefileat_trampoline() +var libc_clonefileat_trampoline_addr uintptr //go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -946,21 +947,21 @@ func Dup(fd int) (nfd int, err error) { return } -func libc_dup_trampoline() +var libc_dup_trampoline_addr uintptr //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_dup2_trampoline() +var libc_dup2_trampoline_addr uintptr //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" @@ -977,25 +978,25 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_exchangedata_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_exchangedata_trampoline() +var libc_exchangedata_trampoline_addr uintptr //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - syscall_syscall(funcPC(libc_exit_trampoline), uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } -func libc_exit_trampoline() +var libc_exit_trampoline_addr uintptr //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" @@ -1007,56 +1008,56 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_faccessat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_faccessat_trampoline() +var libc_faccessat_trampoline_addr uintptr //go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchdir_trampoline() +var libc_fchdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchflags_trampoline() +var libc_fchflags_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchmod_trampoline() +var libc_fchmod_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" @@ -1068,28 +1069,28 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fchmodat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchmodat_trampoline() +var libc_fchmodat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchown_trampoline() +var libc_fchown_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" @@ -1101,14 +1102,14 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fchownat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchownat_trampoline() +var libc_fchownat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" @@ -1120,35 +1121,35 @@ func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fclonefileat_trampoline_addr, uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fclonefileat_trampoline() +var libc_fclonefileat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_flock_trampoline() +var libc_flock_trampoline_addr uintptr //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1156,35 +1157,35 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -func libc_fpathconf_trampoline() +var libc_fpathconf_trampoline_addr uintptr //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fsync_trampoline() +var libc_fsync_trampoline_addr uintptr //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ftruncate_trampoline() +var libc_ftruncate_trampoline_addr uintptr //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" @@ -1197,7 +1198,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1205,62 +1206,62 @@ func Getcwd(buf []byte) (n int, err error) { return } -func libc_getcwd_trampoline() +var libc_getcwd_trampoline_addr uintptr //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdtablesize() (size int) { - r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) + r0, _, _ := syscall_syscall(libc_getdtablesize_trampoline_addr, 0, 0, 0) size = int(r0) return } -func libc_getdtablesize_trampoline() +var libc_getdtablesize_trampoline_addr uintptr //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } -func libc_getegid_trampoline() +var libc_getegid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } -func libc_geteuid_trampoline() +var libc_geteuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } -func libc_getgid_trampoline() +var libc_getgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1268,50 +1269,50 @@ func Getpgid(pid int) (pgid int, err error) { return } -func libc_getpgid_trampoline() +var libc_getpgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } -func libc_getpgrp_trampoline() +var libc_getpgrp_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } -func libc_getpid_trampoline() +var libc_getpid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } -func libc_getppid_trampoline() +var libc_getppid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1319,42 +1320,42 @@ func Getpriority(which int, who int) (prio int, err error) { return } -func libc_getpriority_trampoline() +var libc_getpriority_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getrlimit_trampoline() +var libc_getrlimit_trampoline_addr uintptr //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getrusage_trampoline() +var libc_getrusage_trampoline_addr uintptr //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1362,52 +1363,52 @@ func Getsid(pid int) (sid int, err error) { return } -func libc_getsid_trampoline() +var libc_getsid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_gettimeofday_trampoline() +var libc_gettimeofday_trampoline_addr uintptr //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } -func libc_getuid_trampoline() +var libc_getuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } -func libc_issetugid_trampoline() +var libc_issetugid_trampoline_addr uintptr //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1415,7 +1416,7 @@ func Kqueue() (fd int, err error) { return } -func libc_kqueue_trampoline() +var libc_kqueue_trampoline_addr uintptr //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" @@ -1427,14 +1428,14 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_lchown_trampoline() +var libc_lchown_trampoline_addr uintptr //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" @@ -1451,14 +1452,14 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_link_trampoline() +var libc_link_trampoline_addr uintptr //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" @@ -1475,28 +1476,28 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_linkat_trampoline), uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_linkat_trampoline() +var libc_linkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_listen_trampoline() +var libc_listen_trampoline_addr uintptr //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" @@ -1508,14 +1509,14 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkdir_trampoline() +var libc_mkdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" @@ -1527,14 +1528,14 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkdirat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkdirat_trampoline() +var libc_mkdirat_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" @@ -1546,14 +1547,14 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkfifo_trampoline() +var libc_mkfifo_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" @@ -1565,14 +1566,14 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mknod_trampoline() +var libc_mknod_trampoline_addr uintptr //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" @@ -1584,7 +1585,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1592,7 +1593,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } -func libc_open_trampoline() +var libc_open_trampoline_addr uintptr //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" @@ -1604,7 +1605,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_openat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1612,7 +1613,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } -func libc_openat_trampoline() +var libc_openat_trampoline_addr uintptr //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" @@ -1624,7 +1625,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1632,7 +1633,7 @@ func Pathconf(path string, name int) (val int, err error) { return } -func libc_pathconf_trampoline() +var libc_pathconf_trampoline_addr uintptr //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" @@ -1645,7 +1646,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1653,7 +1654,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } -func libc_pread_trampoline() +var libc_pread_trampoline_addr uintptr //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" @@ -1666,7 +1667,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1674,7 +1675,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } -func libc_pwrite_trampoline() +var libc_pwrite_trampoline_addr uintptr //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" @@ -1687,7 +1688,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1695,7 +1696,7 @@ func read(fd int, p []byte) (n int, err error) { return } -func libc_read_trampoline() +var libc_read_trampoline_addr uintptr //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" @@ -1713,7 +1714,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1721,7 +1722,7 @@ func Readlink(path string, buf []byte) (n int, err error) { return } -func libc_readlink_trampoline() +var libc_readlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" @@ -1739,7 +1740,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_readlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1747,7 +1748,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } -func libc_readlinkat_trampoline() +var libc_readlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" @@ -1764,14 +1765,14 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_rename_trampoline() +var libc_rename_trampoline_addr uintptr //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" @@ -1788,14 +1789,14 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_renameat_trampoline), uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_renameat_trampoline() +var libc_renameat_trampoline_addr uintptr //go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" @@ -1807,14 +1808,14 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_revoke_trampoline() +var libc_revoke_trampoline_addr uintptr //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" @@ -1826,21 +1827,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_rmdir_trampoline() +var libc_rmdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence)) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1848,14 +1849,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } -func libc_lseek_trampoline() +var libc_lseek_trampoline_addr uintptr //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1863,49 +1864,49 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } -func libc_select_trampoline() +var libc_select_trampoline_addr uintptr //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) + _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setegid_trampoline() +var libc_setegid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_seteuid_trampoline() +var libc_seteuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setgid_trampoline() +var libc_setgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" @@ -1917,105 +1918,105 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setlogin_trampoline() +var libc_setlogin_trampoline_addr uintptr //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setpgid_trampoline() +var libc_setpgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setpriority_trampoline() +var libc_setpriority_trampoline_addr uintptr //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setprivexec(flag int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) + _, _, e1 := syscall_syscall(libc_setprivexec_trampoline_addr, uintptr(flag), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setprivexec_trampoline() +var libc_setprivexec_trampoline_addr uintptr //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setregid_trampoline() +var libc_setregid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setreuid_trampoline() +var libc_setreuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setrlimit_trampoline() +var libc_setrlimit_trampoline_addr uintptr //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2023,35 +2024,35 @@ func Setsid() (pid int, err error) { return } -func libc_setsid_trampoline() +var libc_setsid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_settimeofday_trampoline() +var libc_settimeofday_trampoline_addr uintptr //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setuid_trampoline() +var libc_setuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" @@ -2068,14 +2069,14 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_symlink_trampoline() +var libc_symlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" @@ -2092,28 +2093,28 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_symlinkat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_symlinkat_trampoline() +var libc_symlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_sync_trampoline), 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sync_trampoline() +var libc_sync_trampoline_addr uintptr //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" @@ -2125,26 +2126,26 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_truncate_trampoline() +var libc_truncate_trampoline_addr uintptr //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } -func libc_umask_trampoline() +var libc_umask_trampoline_addr uintptr //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" @@ -2156,14 +2157,14 @@ func Undelete(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_undelete_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_undelete_trampoline() +var libc_undelete_trampoline_addr uintptr //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" @@ -2175,14 +2176,14 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unlink_trampoline() +var libc_unlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" @@ -2194,14 +2195,14 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unlinkat_trampoline() +var libc_unlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" @@ -2213,14 +2214,14 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unmount_trampoline() +var libc_unmount_trampoline_addr uintptr //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" @@ -2233,7 +2234,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2241,14 +2242,14 @@ func write(fd int, p []byte) (n int, err error) { return } -func libc_write_trampoline() +var libc_write_trampoline_addr uintptr //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -2256,28 +2257,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } -func libc_mmap_trampoline() +var libc_mmap_trampoline_addr uintptr //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munmap_trampoline() +var libc_munmap_trampoline_addr uintptr //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2288,7 +2289,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2299,14 +2300,14 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstat64_trampoline() +var libc_fstat64_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib" @@ -2318,35 +2319,35 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fstatat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstatat64_trampoline() +var libc_fstatat64_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstatfs64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstatfs64_trampoline() +var libc_fstatfs64_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat64_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_getfsstat64_trampoline_addr, uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2354,7 +2355,7 @@ func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { return } -func libc_getfsstat64_trampoline() +var libc_getfsstat64_trampoline_addr uintptr //go:cgo_import_dynamic libc_getfsstat64 getfsstat64 "/usr/lib/libSystem.B.dylib" @@ -2366,28 +2367,28 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_lstat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat64_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_lstat64_trampoline() +var libc_lstat64_trampoline_addr uintptr //go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ptrace_trampoline() +var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" @@ -2399,14 +2400,14 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_stat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat64_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_stat64_trampoline() +var libc_stat64_trampoline_addr uintptr //go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib" @@ -2418,13 +2419,13 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_statfs64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs64_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_statfs64_trampoline() +var libc_statfs64_trampoline_addr uintptr //go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index c77bd6e20..bc169c2ab 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -1,290 +1,859 @@ // go run mkasm_darwin.go amd64 // Code generated by the command above; DO NOT EDIT. +//go:build go1.12 // +build go1.12 #include "textflag.h" -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) -TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) -TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) -TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) -TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) -TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) -TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) -TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) -TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) + +TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) -TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) + +TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) -TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) + +TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) -TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) + +TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) -TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) + +TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) -TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) + +TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) -TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) + +TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) -TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) + +TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) -TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) + +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) -TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) -TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) -TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) -TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) + +TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) -TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) -TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) -TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) -TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) -TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) -TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) -TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) -TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) -TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) -TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) -TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) -TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) -TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 +DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) -TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat64_trampoline_addr(SB)/8, $libc_fstat64_trampoline<>(SB) + +TEXT libc_fstatat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) -TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstatat64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat64_trampoline_addr(SB)/8, $libc_fstatat64_trampoline<>(SB) + +TEXT libc_fstatfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) -TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstatfs64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs64_trampoline_addr(SB)/8, $libc_fstatfs64_trampoline<>(SB) + +TEXT libc_getfsstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) -TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getfsstat64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat64_trampoline_addr(SB)/8, $libc_getfsstat64_trampoline<>(SB) + +TEXT libc_lstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lstat64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat64_trampoline_addr(SB)/8, $libc_lstat64_trampoline<>(SB) + +TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) -TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) + +TEXT libc_stat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) -TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_stat64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat64_trampoline_addr(SB)/8, $libc_stat64_trampoline<>(SB) + +TEXT libc_statfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs64(SB) + +GLOBL ·libc_statfs64_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs64_trampoline_addr(SB)/8, $libc_statfs64_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go deleted file mode 100644 index de4738fff..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go +++ /dev/null @@ -1,39 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,arm,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,arm,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_closedir_trampoline() - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -func libc_readdir_r_trampoline() - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s deleted file mode 100644 index 488e55707..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s +++ /dev/null @@ -1,12 +0,0 @@ -// go run mkasm_darwin.go arm -// Code generated by the command above; DO NOT EDIT. - -// +build go1.13 - -#include "textflag.h" -TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) -TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) -TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go deleted file mode 100644 index c0c771f40..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ /dev/null @@ -1,2416 +0,0 @@ -// go run mksyscall.go -l32 -tags darwin,arm,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build darwin,arm,go1.12 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getgroups_trampoline() - -//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setgroups_trampoline() - -//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_wait4_trampoline() - -//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_accept_trampoline() - -//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_bind_trampoline() - -//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_connect_trampoline() - -//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_socket_trampoline() - -//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsockopt_trampoline() - -//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setsockopt_trampoline() - -//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpeername_trampoline() - -//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsockname_trampoline() - -//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_shutdown_trampoline() - -//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_socketpair_trampoline() - -//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_recvfrom_trampoline() - -//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendto_trampoline() - -//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_recvmsg_trampoline() - -//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendmsg_trampoline() - -//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kevent_trampoline() - -//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_utimes_trampoline() - -//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_futimes_trampoline() - -//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_poll_trampoline() - -//go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_madvise_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_madvise_trampoline() - -//go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mlock_trampoline() - -//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mlockall_trampoline() - -//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mprotect_trampoline() - -//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_msync_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_msync_trampoline() - -//go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munlock_trampoline() - -//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munlockall_trampoline() - -//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pipe_trampoline() - -//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_getxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getxattr_trampoline() - -//go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_fgetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fgetxattr_trampoline() - -//go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_setxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setxattr_trampoline() - -//go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fsetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fsetxattr_trampoline() - -//go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func removexattr(path string, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_removexattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_removexattr_trampoline() - -//go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fremovexattr(fd int, attr string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_fremovexattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fremovexattr_trampoline() - -//go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_listxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_listxattr_trampoline() - -//go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_flistxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_flistxattr_trampoline() - -//go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setattrlist_trampoline() - -//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kill_trampoline() - -//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ioctl_trampoline() - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sysctl_trampoline() - -//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := syscall_syscall9(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sendfile_trampoline() - -//go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_access_trampoline() - -//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_adjtime_trampoline() - -//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chdir_trampoline() - -//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chflags_trampoline() - -//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chmod_trampoline() - -//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chown_trampoline() - -//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_chroot_trampoline() - -//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clock_gettime_trampoline() - -//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_close_trampoline() - -//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Clonefile(src string, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(src) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clonefile_trampoline() - -//go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(src) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_clonefileat_trampoline() - -//go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_dup_trampoline() - -//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_dup2_trampoline() - -//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exchangedata(path1 string, path2 string, options int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path1) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(path2) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_exchangedata_trampoline() - -//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - syscall_syscall(funcPC(libc_exit_trampoline), uintptr(code), 0, 0) - return -} - -func libc_exit_trampoline() - -//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_faccessat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_faccessat_trampoline() - -//go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchdir_trampoline() - -//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchflags_trampoline() - -//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchmod_trampoline() - -//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fchmodat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchmodat_trampoline() - -//go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchown_trampoline() - -//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fchownat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fchownat_trampoline() - -//go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(dst) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fclonefileat_trampoline() - -//go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_flock_trampoline() - -//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fpathconf_trampoline() - -//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fsync_trampoline() - -//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ftruncate_trampoline() - -//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getcwd_trampoline() - -//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdtablesize() (size int) { - r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) - size = int(r0) - return -} - -func libc_getdtablesize_trampoline() - -//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) - egid = int(r0) - return -} - -func libc_getegid_trampoline() - -//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) - uid = int(r0) - return -} - -func libc_geteuid_trampoline() - -//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) - gid = int(r0) - return -} - -func libc_getgid_trampoline() - -//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpgid_trampoline() - -//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) - pgrp = int(r0) - return -} - -func libc_getpgrp_trampoline() - -//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) - pid = int(r0) - return -} - -func libc_getpid_trampoline() - -//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) - ppid = int(r0) - return -} - -func libc_getppid_trampoline() - -//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getpriority_trampoline() - -//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getrlimit_trampoline() - -//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getrusage_trampoline() - -//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getsid_trampoline() - -//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) - uid = int(r0) - return -} - -func libc_getuid_trampoline() - -//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -func libc_issetugid_trampoline() - -//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_kqueue_trampoline() - -//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lchown_trampoline() - -//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_link_trampoline() - -//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_linkat_trampoline), uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_linkat_trampoline() - -//go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_listen_trampoline() - -//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkdir_trampoline() - -//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkdirat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkdirat_trampoline() - -//go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mkfifo_trampoline() - -//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mknod_trampoline() - -//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_open_trampoline() - -//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall6(funcPC(libc_openat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_openat_trampoline() - -//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall_syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pathconf_trampoline() - -//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pread_trampoline() - -//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_pwrite_trampoline() - -//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_read_trampoline() - -//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_readlink_trampoline() - -//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc_readlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_readlinkat_trampoline() - -//go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_rename_trampoline() - -//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_renameat_trampoline), uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_renameat_trampoline() - -//go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_revoke_trampoline() - -//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_rmdir_trampoline() - -//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := syscall_syscall6(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) - newoffset = int64(int64(r1)<<32 | int64(r0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lseek_trampoline() - -//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_select_trampoline() - -//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setegid_trampoline() - -//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_seteuid_trampoline() - -//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setgid_trampoline() - -//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setlogin_trampoline() - -//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setpgid_trampoline() - -//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setpriority_trampoline() - -//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setprivexec(flag int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setprivexec_trampoline() - -//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setregid_trampoline() - -//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setreuid_trampoline() - -//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setrlimit_trampoline() - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setsid_trampoline() - -//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_settimeofday_trampoline() - -//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_setuid_trampoline() - -//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_symlink_trampoline() - -//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_symlinkat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_symlinkat_trampoline() - -//go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_sync_trampoline), 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_sync_trampoline() - -//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_truncate_trampoline() - -//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -func libc_umask_trampoline() - -//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_undelete_trampoline() - -//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unlink_trampoline() - -//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unlinkat_trampoline() - -//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_unmount_trampoline() - -//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_write_trampoline() - -//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall9(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_mmap_trampoline() - -//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_munmap_trampoline() - -//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstat_trampoline() - -//go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(funcPC(libc_fstatat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstatat_trampoline() - -//go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstatfs_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fstatfs_trampoline() - -//go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_getfsstat_trampoline() - -//go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_lstat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_lstat_trampoline() - -//go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_stat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_stat_trampoline() - -//go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(funcPC(libc_statfs_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_statfs_trampoline() - -//go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s deleted file mode 100644 index 5eec5f1d9..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ /dev/null @@ -1,288 +0,0 @@ -// go run mkasm_darwin.go arm -// Code generated by the command above; DO NOT EDIT. - -// +build go1.12 - -#include "textflag.h" -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 - JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 - JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 - JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 - JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 - JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 - JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 - JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 - JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 - JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 - JMP libc_futimes(SB) -TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 - JMP libc_poll(SB) -TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 - JMP libc_madvise(SB) -TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mlock(SB) -TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mlockall(SB) -TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mprotect(SB) -TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_msync(SB) -TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munlock(SB) -TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munlockall(SB) -TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pipe(SB) -TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getxattr(SB) -TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fgetxattr(SB) -TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setxattr(SB) -TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fsetxattr(SB) -TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_removexattr(SB) -TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fremovexattr(SB) -TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_listxattr(SB) -TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 - JMP libc_flistxattr(SB) -TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setattrlist(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kill(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ioctl(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) -TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sendfile(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 - JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 - JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 - JMP libc_chroot(SB) -TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clock_gettime(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 - JMP libc_close(SB) -TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clonefile(SB) -TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_clonefileat(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 - JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 - JMP libc_dup2(SB) -TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 - JMP libc_exchangedata(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_exit(SB) -TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_faccessat(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchmod(SB) -TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchmodat(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchown(SB) -TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fchownat(SB) -TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fclonefileat(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 - JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fpathconf(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ftruncate(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getcwd(SB) -TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getdtablesize(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getsid(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_issetugid(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 - JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 - JMP libc_link(SB) -TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_linkat(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 - JMP libc_listen(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkdir(SB) -TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkdirat(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mknod(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 - JMP libc_open(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_openat(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 - JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 - JMP libc_read(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readlink(SB) -TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_readlinkat(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 - JMP libc_rename(SB) -TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_renameat(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 - JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 - JMP libc_rmdir(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lseek(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 - JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setpriority(SB) -TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setprivexec(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 - JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 - JMP libc_setuid(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_symlink(SB) -TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_symlinkat(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 - JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 - JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 - JMP libc_umask(SB) -TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 - JMP libc_undelete(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unlink(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unlinkat(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 - JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 - JMP libc_write(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 - JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 - JMP libc_munmap(SB) -TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstat(SB) -TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstatat(SB) -TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fstatfs(SB) -TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getfsstat(SB) -TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_lstat(SB) -TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 - JMP libc_stat(SB) -TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 - JMP libc_statfs(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go index 870eb37ab..cec595d55 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && arm64 && go1.13 // +build darwin,arm64,go1.13 package unix @@ -15,25 +16,25 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_closedir_trampoline() +var libc_closedir_trampoline_addr uintptr //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) res = Errno(r0) return } -func libc_readdir_r_trampoline() +var libc_readdir_r_trampoline_addr uintptr //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s index b29dabb0f..357989722 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s @@ -1,12 +1,25 @@ // go run mkasm_darwin.go arm64 // Code generated by the command above; DO NOT EDIT. +//go:build go1.13 // +build go1.13 #include "textflag.h" -TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) -TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) -TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 9b01a79c4..f2ee2bd33 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,arm64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && arm64 && go1.12 // +build darwin,arm64,go1.12 package unix @@ -15,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -23,28 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } -func libc_getgroups_trampoline() +var libc_getgroups_trampoline_addr uintptr //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setgroups_trampoline() +var libc_setgroups_trampoline_addr uintptr //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -52,14 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } -func libc_wait4_trampoline() +var libc_wait4_trampoline_addr uintptr //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -67,42 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } -func libc_accept_trampoline() +var libc_accept_trampoline_addr uintptr //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_bind_trampoline() +var libc_bind_trampoline_addr uintptr //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_connect_trampoline() +var libc_connect_trampoline_addr uintptr //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -110,91 +111,91 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } -func libc_socket_trampoline() +var libc_socket_trampoline_addr uintptr //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getsockopt_trampoline() +var libc_getsockopt_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setsockopt_trampoline() +var libc_setsockopt_trampoline_addr uintptr //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getpeername_trampoline() +var libc_getpeername_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getsockname_trampoline() +var libc_getsockname_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_shutdown_trampoline() +var libc_shutdown_trampoline_addr uintptr //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_socketpair_trampoline() +var libc_socketpair_trampoline_addr uintptr //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" @@ -207,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -215,7 +216,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } -func libc_recvfrom_trampoline() +var libc_recvfrom_trampoline_addr uintptr //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" @@ -228,21 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sendto_trampoline() +var libc_sendto_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -250,14 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -func libc_recvmsg_trampoline() +var libc_recvmsg_trampoline_addr uintptr //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -265,14 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -func libc_sendmsg_trampoline() +var libc_sendmsg_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -280,7 +281,7 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -func libc_kevent_trampoline() +var libc_kevent_trampoline_addr uintptr //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" @@ -292,35 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_utimes_trampoline() +var libc_utimes_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_futimes_trampoline() +var libc_futimes_trampoline_addr uintptr //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -328,7 +329,7 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } -func libc_poll_trampoline() +var libc_poll_trampoline_addr uintptr //go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" @@ -341,14 +342,14 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_madvise_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_madvise_trampoline() +var libc_madvise_trampoline_addr uintptr //go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" @@ -361,28 +362,28 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mlock_trampoline() +var libc_mlock_trampoline_addr uintptr //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mlockall_trampoline() +var libc_mlockall_trampoline_addr uintptr //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" @@ -395,14 +396,14 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mprotect_trampoline() +var libc_mprotect_trampoline_addr uintptr //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" @@ -415,14 +416,14 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_msync_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_msync_trampoline() +var libc_msync_trampoline_addr uintptr //go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" @@ -435,42 +436,42 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munlock_trampoline() +var libc_munlock_trampoline_addr uintptr //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munlockall_trampoline() +var libc_munlockall_trampoline_addr uintptr //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_pipe_trampoline() +var libc_pipe_trampoline_addr uintptr //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" @@ -487,7 +488,7 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_getxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + r0, _, e1 := syscall_syscall6(libc_getxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -495,7 +496,7 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o return } -func libc_getxattr_trampoline() +var libc_getxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" @@ -507,7 +508,7 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_fgetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + r0, _, e1 := syscall_syscall6(libc_fgetxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -515,7 +516,7 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio return } -func libc_fgetxattr_trampoline() +var libc_fgetxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" @@ -532,14 +533,14 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_setxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + _, _, e1 := syscall_syscall6(libc_setxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setxattr_trampoline() +var libc_setxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" @@ -551,14 +552,14 @@ func fsetxattr(fd int, attr string, data *byte, size int, position uint32, optio if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fsetxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + _, _, e1 := syscall_syscall6(libc_fsetxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fsetxattr_trampoline() +var libc_fsetxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" @@ -575,14 +576,14 @@ func removexattr(path string, attr string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_removexattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_removexattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_removexattr_trampoline() +var libc_removexattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" @@ -594,14 +595,14 @@ func fremovexattr(fd int, attr string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_fremovexattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_fremovexattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fremovexattr_trampoline() +var libc_fremovexattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" @@ -613,7 +614,7 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_listxattr_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + r0, _, e1 := syscall_syscall6(libc_listxattr_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -621,14 +622,14 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro return } -func libc_listxattr_trampoline() +var libc_listxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_flistxattr_trampoline), uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + r0, _, e1 := syscall_syscall6(libc_flistxattr_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) sz = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -636,28 +637,28 @@ func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { return } -func libc_flistxattr_trampoline() +var libc_flistxattr_trampoline_addr uintptr //go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_setattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setattrlist_trampoline() +var libc_setattrlist_trampoline_addr uintptr //go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -665,35 +666,35 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } -func libc_fcntl_trampoline() +var libc_fcntl_trampoline_addr uintptr //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_kill_trampoline() +var libc_kill_trampoline_addr uintptr //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ioctl_trampoline() +var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" @@ -706,28 +707,28 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sysctl_trampoline() +var libc_sysctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) + _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sendfile_trampoline() +var libc_sendfile_trampoline_addr uintptr //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" @@ -739,28 +740,28 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_access_trampoline() +var libc_access_trampoline_addr uintptr //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_adjtime_trampoline() +var libc_adjtime_trampoline_addr uintptr //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" @@ -772,14 +773,14 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chdir_trampoline() +var libc_chdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" @@ -791,14 +792,14 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chflags_trampoline() +var libc_chflags_trampoline_addr uintptr //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" @@ -810,14 +811,14 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chmod_trampoline() +var libc_chmod_trampoline_addr uintptr //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" @@ -829,14 +830,14 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chown_trampoline() +var libc_chown_trampoline_addr uintptr //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" @@ -848,42 +849,42 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_chroot_trampoline() +var libc_chroot_trampoline_addr uintptr //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clock_gettime_trampoline() +var libc_clock_gettime_trampoline_addr uintptr //go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_close_trampoline() +var libc_close_trampoline_addr uintptr //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" @@ -900,14 +901,14 @@ func Clonefile(src string, dst string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_clonefile_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_clonefile_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clonefile_trampoline() +var libc_clonefile_trampoline_addr uintptr //go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" @@ -924,21 +925,21 @@ func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_clonefileat_trampoline), uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_clonefileat_trampoline_addr, uintptr(srcDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_clonefileat_trampoline() +var libc_clonefileat_trampoline_addr uintptr //go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -946,21 +947,21 @@ func Dup(fd int) (nfd int, err error) { return } -func libc_dup_trampoline() +var libc_dup_trampoline_addr uintptr //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_dup2_trampoline() +var libc_dup2_trampoline_addr uintptr //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" @@ -977,25 +978,25 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall_syscall(libc_exchangedata_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_exchangedata_trampoline() +var libc_exchangedata_trampoline_addr uintptr //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - syscall_syscall(funcPC(libc_exit_trampoline), uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } -func libc_exit_trampoline() +var libc_exit_trampoline_addr uintptr //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" @@ -1007,56 +1008,56 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_faccessat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_faccessat_trampoline() +var libc_faccessat_trampoline_addr uintptr //go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchdir_trampoline() +var libc_fchdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchflags_trampoline() +var libc_fchflags_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchmod_trampoline() +var libc_fchmod_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" @@ -1068,28 +1069,28 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fchmodat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchmodat_trampoline() +var libc_fchmodat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchown_trampoline() +var libc_fchown_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" @@ -1101,14 +1102,14 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fchownat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fchownat_trampoline() +var libc_fchownat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" @@ -1120,35 +1121,35 @@ func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fclonefileat_trampoline), uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fclonefileat_trampoline_addr, uintptr(srcDirfd), uintptr(dstDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fclonefileat_trampoline() +var libc_fclonefileat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_flock_trampoline() +var libc_flock_trampoline_addr uintptr //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1156,35 +1157,35 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -func libc_fpathconf_trampoline() +var libc_fpathconf_trampoline_addr uintptr //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fsync_trampoline() +var libc_fsync_trampoline_addr uintptr //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ftruncate_trampoline() +var libc_ftruncate_trampoline_addr uintptr //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" @@ -1197,7 +1198,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1205,62 +1206,62 @@ func Getcwd(buf []byte) (n int, err error) { return } -func libc_getcwd_trampoline() +var libc_getcwd_trampoline_addr uintptr //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdtablesize() (size int) { - r0, _, _ := syscall_syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) + r0, _, _ := syscall_syscall(libc_getdtablesize_trampoline_addr, 0, 0, 0) size = int(r0) return } -func libc_getdtablesize_trampoline() +var libc_getdtablesize_trampoline_addr uintptr //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } -func libc_getegid_trampoline() +var libc_getegid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } -func libc_geteuid_trampoline() +var libc_geteuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } -func libc_getgid_trampoline() +var libc_getgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1268,50 +1269,50 @@ func Getpgid(pid int) (pgid int, err error) { return } -func libc_getpgid_trampoline() +var libc_getpgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } -func libc_getpgrp_trampoline() +var libc_getpgrp_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } -func libc_getpid_trampoline() +var libc_getpid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } -func libc_getppid_trampoline() +var libc_getppid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1319,42 +1320,42 @@ func Getpriority(which int, who int) (prio int, err error) { return } -func libc_getpriority_trampoline() +var libc_getpriority_trampoline_addr uintptr //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getrlimit_trampoline() +var libc_getrlimit_trampoline_addr uintptr //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_getrusage_trampoline() +var libc_getrusage_trampoline_addr uintptr //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1362,52 +1363,52 @@ func Getsid(pid int) (sid int, err error) { return } -func libc_getsid_trampoline() +var libc_getsid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_gettimeofday_trampoline() +var libc_gettimeofday_trampoline_addr uintptr //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } -func libc_getuid_trampoline() +var libc_getuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := syscall_rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } -func libc_issetugid_trampoline() +var libc_issetugid_trampoline_addr uintptr //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1415,7 +1416,7 @@ func Kqueue() (fd int, err error) { return } -func libc_kqueue_trampoline() +var libc_kqueue_trampoline_addr uintptr //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" @@ -1427,14 +1428,14 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_lchown_trampoline() +var libc_lchown_trampoline_addr uintptr //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" @@ -1451,14 +1452,14 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_link_trampoline() +var libc_link_trampoline_addr uintptr //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" @@ -1475,28 +1476,28 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_linkat_trampoline), uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_linkat_trampoline() +var libc_linkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_listen_trampoline() +var libc_listen_trampoline_addr uintptr //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" @@ -1508,14 +1509,14 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkdir_trampoline() +var libc_mkdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" @@ -1527,14 +1528,14 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkdirat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkdirat_trampoline() +var libc_mkdirat_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" @@ -1546,14 +1547,14 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mkfifo_trampoline() +var libc_mkfifo_trampoline_addr uintptr //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" @@ -1565,14 +1566,14 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_mknod_trampoline() +var libc_mknod_trampoline_addr uintptr //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" @@ -1584,7 +1585,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1592,7 +1593,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } -func libc_open_trampoline() +var libc_open_trampoline_addr uintptr //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" @@ -1604,7 +1605,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall6(funcPC(libc_openat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1612,7 +1613,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } -func libc_openat_trampoline() +var libc_openat_trampoline_addr uintptr //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" @@ -1624,7 +1625,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1632,7 +1633,7 @@ func Pathconf(path string, name int) (val int, err error) { return } -func libc_pathconf_trampoline() +var libc_pathconf_trampoline_addr uintptr //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" @@ -1645,7 +1646,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1653,7 +1654,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } -func libc_pread_trampoline() +var libc_pread_trampoline_addr uintptr //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" @@ -1666,7 +1667,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1674,7 +1675,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } -func libc_pwrite_trampoline() +var libc_pwrite_trampoline_addr uintptr //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" @@ -1687,7 +1688,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1695,7 +1696,7 @@ func read(fd int, p []byte) (n int, err error) { return } -func libc_read_trampoline() +var libc_read_trampoline_addr uintptr //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" @@ -1713,7 +1714,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1721,7 +1722,7 @@ func Readlink(path string, buf []byte) (n int, err error) { return } -func libc_readlink_trampoline() +var libc_readlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" @@ -1739,7 +1740,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(funcPC(libc_readlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1747,7 +1748,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } -func libc_readlinkat_trampoline() +var libc_readlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" @@ -1764,14 +1765,14 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_rename_trampoline() +var libc_rename_trampoline_addr uintptr //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" @@ -1788,14 +1789,14 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_renameat_trampoline), uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_renameat_trampoline() +var libc_renameat_trampoline_addr uintptr //go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" @@ -1807,14 +1808,14 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_revoke_trampoline() +var libc_revoke_trampoline_addr uintptr //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" @@ -1826,21 +1827,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_rmdir_trampoline() +var libc_rmdir_trampoline_addr uintptr //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence)) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1848,14 +1849,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } -func libc_lseek_trampoline() +var libc_lseek_trampoline_addr uintptr //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1863,49 +1864,49 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } -func libc_select_trampoline() +var libc_select_trampoline_addr uintptr //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) + _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setegid_trampoline() +var libc_setegid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_seteuid_trampoline() +var libc_seteuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setgid_trampoline() +var libc_setgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" @@ -1917,105 +1918,105 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setlogin_trampoline() +var libc_setlogin_trampoline_addr uintptr //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setpgid_trampoline() +var libc_setpgid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setpriority_trampoline() +var libc_setpriority_trampoline_addr uintptr //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setprivexec(flag int) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) + _, _, e1 := syscall_syscall(libc_setprivexec_trampoline_addr, uintptr(flag), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setprivexec_trampoline() +var libc_setprivexec_trampoline_addr uintptr //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setregid_trampoline() +var libc_setregid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setreuid_trampoline() +var libc_setreuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setrlimit_trampoline() +var libc_setrlimit_trampoline_addr uintptr //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2023,35 +2024,35 @@ func Setsid() (pid int, err error) { return } -func libc_setsid_trampoline() +var libc_setsid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_settimeofday_trampoline() +var libc_settimeofday_trampoline_addr uintptr //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_setuid_trampoline() +var libc_setuid_trampoline_addr uintptr //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" @@ -2068,14 +2069,14 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_symlink_trampoline() +var libc_symlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" @@ -2092,28 +2093,28 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_symlinkat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } -func libc_symlinkat_trampoline() +var libc_symlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_sync_trampoline), 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_sync_trampoline() +var libc_sync_trampoline_addr uintptr //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" @@ -2125,26 +2126,26 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_truncate_trampoline() +var libc_truncate_trampoline_addr uintptr //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } -func libc_umask_trampoline() +var libc_umask_trampoline_addr uintptr //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" @@ -2156,14 +2157,14 @@ func Undelete(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_undelete_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_undelete_trampoline() +var libc_undelete_trampoline_addr uintptr //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" @@ -2175,14 +2176,14 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unlink_trampoline() +var libc_unlink_trampoline_addr uintptr //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" @@ -2194,14 +2195,14 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unlinkat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unlinkat_trampoline() +var libc_unlinkat_trampoline_addr uintptr //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" @@ -2213,14 +2214,14 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_unmount_trampoline() +var libc_unmount_trampoline_addr uintptr //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" @@ -2233,7 +2234,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2241,14 +2242,14 @@ func write(fd int, p []byte) (n int, err error) { return } -func libc_write_trampoline() +var libc_write_trampoline_addr uintptr //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -2256,28 +2257,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } -func libc_mmap_trampoline() +var libc_mmap_trampoline_addr uintptr //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_munmap_trampoline() +var libc_munmap_trampoline_addr uintptr //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2288,7 +2289,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2299,14 +2300,14 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstat_trampoline() +var libc_fstat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib" @@ -2318,35 +2319,35 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(funcPC(libc_fstatat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstatat_trampoline() +var libc_fstatat_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(funcPC(libc_fstatfs_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_fstatfs_trampoline() +var libc_fstatfs_trampoline_addr uintptr //go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2354,7 +2355,7 @@ func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { return } -func libc_getfsstat_trampoline() +var libc_getfsstat_trampoline_addr uintptr //go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib" @@ -2366,28 +2367,28 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_lstat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_lstat_trampoline() +var libc_lstat_trampoline_addr uintptr //go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_ptrace_trampoline() +var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" @@ -2399,14 +2400,14 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_stat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_stat_trampoline() +var libc_stat_trampoline_addr uintptr //go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib" @@ -2418,13 +2419,13 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(funcPC(libc_statfs_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_statfs_trampoline() +var libc_statfs_trampoline_addr uintptr //go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 53c402bf6..33e19776d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -1,290 +1,859 @@ // go run mkasm_darwin.go arm64 // Code generated by the command above; DO NOT EDIT. +//go:build go1.12 // +build go1.12 #include "textflag.h" -TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) -TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) -TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) -TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) -TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) -TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) -TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) -TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) -TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) -TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) -TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) -TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) -TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) -TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) -TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) -TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) -TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) -TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) -TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) -TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) -TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) -TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) -TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) -TEXT ·libc_msync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) -TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) -TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) -TEXT ·libc_getxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) + +TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) -TEXT ·libc_fgetxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) + +TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) -TEXT ·libc_setxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) + +TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) -TEXT ·libc_fsetxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) + +TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) -TEXT ·libc_removexattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) + +TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) -TEXT ·libc_fremovexattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) + +TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) -TEXT ·libc_listxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) + +TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) -TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) + +TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) -TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) + +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) -TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) -TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) -TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) -TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) -TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) -TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) -TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) -TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) -TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) -TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) -TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) -TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) -TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) -TEXT ·libc_clonefile_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) -TEXT ·libc_clonefileat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) + +TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) -TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) -TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) -TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) -TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) -TEXT ·libc_faccessat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) -TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) -TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) -TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) -TEXT ·libc_fchmodat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) -TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) -TEXT ·libc_fchownat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) -TEXT ·libc_fclonefileat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) -TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) -TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) -TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) -TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) -TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) -TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) -TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) -TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) -TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) -TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) -TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) -TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) -TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) -TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) -TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) -TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) -TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) -TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) -TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) -TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) -TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) -TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) -TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) -TEXT ·libc_linkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) -TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) -TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) -TEXT ·libc_mkdirat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) -TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) -TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) -TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) -TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) -TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) -TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) -TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) -TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) -TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) -TEXT ·libc_readlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) -TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) -TEXT ·libc_renameat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) -TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) -TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) -TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) -TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) -TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) -TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) -TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) -TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) -TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) -TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) -TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) -TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) -TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) -TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrlimit(SB) -TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) -TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) -TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) -TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) -TEXT ·libc_symlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) -TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) -TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) -TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) -TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) -TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 +DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) -TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) -TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) -TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) -TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) -TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) -TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) -TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) -TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) -TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) -TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) -TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 1aaccd361..1b6eedfa6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -dragonfly -tags dragonfly,amd64 syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build dragonfly && amd64 // +build dragonfly,amd64 package unix @@ -362,8 +363,10 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func pipe2(p *[2]_C_int, flags int) (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + r = int(r0) + w = int(r1) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 600f1d26d..3e9bddb7b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -tags freebsd,386 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && 386 // +build freebsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 064934b0d..c72a462b9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags freebsd,amd64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && amd64 // +build freebsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 31d2c4616..530d5df90 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -arm -tags freebsd,arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && arm // +build freebsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 4adaaa561..71e7df9e8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags freebsd,arm64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && arm64 // +build freebsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 665dd9e4b..af5cb064e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall_solaris.go -illumos -tags illumos,amd64 syscall_illumos.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build illumos && amd64 // +build illumos,amd64 package unix @@ -14,19 +15,25 @@ import ( //go:cgo_import_dynamic libc_writev writev "libc.so" //go:cgo_import_dynamic libc_pwritev pwritev "libc.so" //go:cgo_import_dynamic libc_accept4 accept4 "libsocket.so" +//go:cgo_import_dynamic libc_putmsg putmsg "libc.so" +//go:cgo_import_dynamic libc_getmsg getmsg "libc.so" //go:linkname procreadv libc_readv //go:linkname procpreadv libc_preadv //go:linkname procwritev libc_writev //go:linkname procpwritev libc_pwritev //go:linkname procaccept4 libc_accept4 +//go:linkname procputmsg libc_putmsg +//go:linkname procgetmsg libc_getmsg var ( procreadv, procpreadv, procwritev, procpwritev, - procaccept4 syscallFunc + procaccept4, + procputmsg, + procgetmsg syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -99,3 +106,23 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 2fbbbe5a8..7305cc915 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,5 +1,6 @@ // Code generated by mkmerge.go; DO NOT EDIT. +//go:build linux // +build linux package unix @@ -531,6 +532,16 @@ func Close(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func CloseRange(first uint, last uint, flags uint) (err error) { + _, _, e1 := Syscall(SYS_CLOSE_RANGE, uintptr(first), uintptr(last), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 19ebd3ff7..e37096e4d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && 386 // +build linux,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 5c562182a..9919d8486 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && amd64 // +build linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index dc69d99c6..076754d48 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -arm -tags linux,arm syscall_linux.go syscall_linux_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && arm // +build linux,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 1b897dee0..e893f987f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,arm64 syscall_linux.go syscall_linux_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && arm64 // +build linux,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 49186843a..4703cf3c3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1,6 +1,7 @@ // go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mips // +build linux,mips package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 9171d3bd2..a134f9a4d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mips64 // +build linux,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 82286f04f..b1fff2d94 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,mips64le syscall_linux.go syscall_linux_mips64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mips64le // +build linux,mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 15920621c..d13d6da01 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mipsle // +build linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go new file mode 100644 index 000000000..927cf1a00 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -0,0 +1,762 @@ +// go run mksyscall.go -b32 -tags linux,ppc syscall_linux.go syscall_linux_ppc.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build linux && ppc +// +build linux,ppc + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask>>32), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off>>32), uintptr(off), uintptr(len>>32), uintptr(len)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, r1, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(int64(r0)<<32 | int64(r1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE64, uintptr(fd), uintptr(length>>32), uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ioperm(from int, num int, on int) (err error) { + _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Iopl(level int) (err error) { + _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset>>32), uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset>>32), uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE64, uintptr(unsafe.Pointer(_p0)), uintptr(length>>32), uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Time(t *Time_t) (tt Time_t, err error) { + r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0) + tt = Time_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, buf *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_UGETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 73a42e2cc..da8ec0396 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && ppc64 // +build linux,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 6b8559536..083f493bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && ppc64le // +build linux,ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index b76133447..63b393b80 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,riscv64 syscall_linux.go syscall_linux_riscv64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && riscv64 // +build linux,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index d7032ab1e..bb347407d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,s390x syscall_linux.go syscall_linux_s390x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && s390x // +build linux,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index bcbbdd906..8edc517e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && sparc64 // +build linux,sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 3bbd9e39c..4726ab30a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -netbsd -tags netbsd,386 syscall_bsd.go syscall_netbsd.go syscall_netbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && 386 // +build netbsd,386 package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index d8cf5012c..fe71456db 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -netbsd -tags netbsd,amd64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && amd64 // +build netbsd,amd64 package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 1153fe69b..0b5b2f014 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -netbsd -arm -tags netbsd,arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && arm // +build netbsd,arm package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 24b4ebb41..bfca28648 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -netbsd -tags netbsd,arm64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && arm64 // +build netbsd,arm64 package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index b44b31aeb..8f80f4ade 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && 386 // +build openbsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 67f93ee76..3a47aca7b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && amd64 // +build openbsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index d7c878b1d..883a9b45e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && arm // +build openbsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 8facd695d..aac7fdc95 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && arm64 // +build openbsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index ec6bd5bb7..877618746 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && mips64 // +build openbsd,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 6dbb83716..4e18d5c99 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall_solaris.go -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build solaris && amd64 // +build solaris,amd64 package unix @@ -115,6 +116,7 @@ import ( //go:cgo_import_dynamic libc_statvfs statvfs "libc.so" //go:cgo_import_dynamic libc_symlink symlink "libc.so" //go:cgo_import_dynamic libc_sync sync "libc.so" +//go:cgo_import_dynamic libc_sysconf sysconf "libc.so" //go:cgo_import_dynamic libc_times times "libc.so" //go:cgo_import_dynamic libc_truncate truncate "libc.so" //go:cgo_import_dynamic libc_fsync fsync "libc.so" @@ -245,6 +247,7 @@ import ( //go:linkname procStatvfs libc_statvfs //go:linkname procSymlink libc_symlink //go:linkname procSync libc_sync +//go:linkname procSysconf libc_sysconf //go:linkname procTimes libc_times //go:linkname procTruncate libc_truncate //go:linkname procFsync libc_fsync @@ -376,6 +379,7 @@ var ( procStatvfs, procSymlink, procSync, + procSysconf, procTimes, procTruncate, procFsync, @@ -615,8 +619,9 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) +func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + ret = int(r0) if e1 != 0 { err = e1 } @@ -1687,6 +1692,17 @@ func Sync() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Sysconf(which int) (n int64, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSysconf)), 1, uintptr(which), 0, 0, 0, 0, 0) + n = int64(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Times(tms *Tms) (ticks uintptr, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) ticks = uintptr(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go new file mode 100644 index 000000000..f2079457c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -0,0 +1,1255 @@ +// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Creat(path string, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Errno2() (er2 int) { + uer2, _, _ := syscall_syscall(SYS___ERRNO2, 0, 0, 0) + er2 = int(uer2) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Err2ad() (eadd *int) { + ueadd, _, _ := syscall_syscall(SYS___ERR2AD, 0, 0, 0) + eadd = (*int)(unsafe.Pointer(ueadd)) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { + r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + retval = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, stat *Stat_LE_t) (err error) { + _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatvfs(fd int, stat *Statvfs_t) (err error) { + _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpagesize() (pgsize int) { + r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) + pgsize = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var _p0 unsafe.Pointer + if len(fds) > 0 { + _p0 = unsafe.Pointer(&fds[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent(buff *byte, size int) (lastsys int, err error) { + r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) + lastsys = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { + r0, _, e1 := syscall_syscall(SYS___W_GETMNTENT_A, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) + lastsys = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(filesystem) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(parm) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unmount(filesystem string, mtm int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(filesystem) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gethostname(buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (pid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrusage(who int, rusage *rusage_zos) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig Signal) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, stat *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(uid int) (err error) { + _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func stat(path string, statLE *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + syscall_syscall(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tcgetattr(fildes int, termptr *Termios) (err error) { + _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { + _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, utim *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func remove(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { + r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tv *timeval_zos) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { + r0, _, e1 := syscall_syscall6(SYS_SELECT, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 102f1ab47..9e9d0b2a9 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index 4866fced8..adecd0966 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index d3801eb24..8ea52a4a1 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index ba4304fd2..154b57ae3 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index aca34b349..d96bb2ba4 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go deleted file mode 100644 index ad62324c7..000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go +++ /dev/null @@ -1,437 +0,0 @@ -// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk/usr/include/sys/syscall.h -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build 386,darwin - -package unix - -// Deprecated: Use libSystem wrappers instead of direct syscalls. -const ( - SYS_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_CHDIR = 12 - SYS_FCHDIR = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_CHOWN = 16 - SYS_GETFSSTAT = 18 - SYS_GETPID = 20 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_GETEUID = 25 - SYS_PTRACE = 26 - SYS_RECVMSG = 27 - SYS_SENDMSG = 28 - SYS_RECVFROM = 29 - SYS_ACCEPT = 30 - SYS_GETPEERNAME = 31 - SYS_GETSOCKNAME = 32 - SYS_ACCESS = 33 - SYS_CHFLAGS = 34 - SYS_FCHFLAGS = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_GETPPID = 39 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_GETEGID = 43 - SYS_SIGACTION = 46 - SYS_GETGID = 47 - SYS_SIGPROCMASK = 48 - SYS_GETLOGIN = 49 - SYS_SETLOGIN = 50 - SYS_ACCT = 51 - SYS_SIGPENDING = 52 - SYS_SIGALTSTACK = 53 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_REVOKE = 56 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETPGID = 82 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_GETDTABLESIZE = 89 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_GETPRIORITY = 100 - SYS_BIND = 104 - SYS_SETSOCKOPT = 105 - SYS_LISTEN = 106 - SYS_SIGSUSPEND = 111 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_FLOCK = 131 - SYS_MKFIFO = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_FUTIMES = 139 - SYS_ADJTIME = 140 - SYS_GETHOSTUUID = 142 - SYS_SETSID = 147 - SYS_GETPGID = 151 - SYS_SETPRIVEXEC = 152 - SYS_PREAD = 153 - SYS_PWRITE = 154 - SYS_NFSSVC = 155 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UNMOUNT = 159 - SYS_GETFH = 161 - SYS_QUOTACTL = 165 - SYS_MOUNT = 167 - SYS_CSOPS = 169 - SYS_CSOPS_AUDITTOKEN = 170 - SYS_WAITID = 173 - SYS_KDEBUG_TYPEFILTER = 177 - SYS_KDEBUG_TRACE_STRING = 178 - SYS_KDEBUG_TRACE64 = 179 - SYS_KDEBUG_TRACE = 180 - SYS_SETGID = 181 - SYS_SETEGID = 182 - SYS_SETEUID = 183 - SYS_SIGRETURN = 184 - SYS_THREAD_SELFCOUNTS = 186 - SYS_FDATASYNC = 187 - SYS_STAT = 188 - SYS_FSTAT = 189 - SYS_LSTAT = 190 - SYS_PATHCONF = 191 - SYS_FPATHCONF = 192 - SYS_GETRLIMIT = 194 - SYS_SETRLIMIT = 195 - SYS_GETDIRENTRIES = 196 - SYS_MMAP = 197 - SYS_LSEEK = 199 - SYS_TRUNCATE = 200 - SYS_FTRUNCATE = 201 - SYS_SYSCTL = 202 - SYS_MLOCK = 203 - SYS_MUNLOCK = 204 - SYS_UNDELETE = 205 - SYS_OPEN_DPROTECTED_NP = 216 - SYS_GETATTRLIST = 220 - SYS_SETATTRLIST = 221 - SYS_GETDIRENTRIESATTR = 222 - SYS_EXCHANGEDATA = 223 - SYS_SEARCHFS = 225 - SYS_DELETE = 226 - SYS_COPYFILE = 227 - SYS_FGETATTRLIST = 228 - SYS_FSETATTRLIST = 229 - SYS_POLL = 230 - SYS_WATCHEVENT = 231 - SYS_WAITEVENT = 232 - SYS_MODWATCH = 233 - SYS_GETXATTR = 234 - SYS_FGETXATTR = 235 - SYS_SETXATTR = 236 - SYS_FSETXATTR = 237 - SYS_REMOVEXATTR = 238 - SYS_FREMOVEXATTR = 239 - SYS_LISTXATTR = 240 - SYS_FLISTXATTR = 241 - SYS_FSCTL = 242 - SYS_INITGROUPS = 243 - SYS_POSIX_SPAWN = 244 - SYS_FFSCTL = 245 - SYS_NFSCLNT = 247 - SYS_FHOPEN = 248 - SYS_MINHERIT = 250 - SYS_SEMSYS = 251 - SYS_MSGSYS = 252 - SYS_SHMSYS = 253 - SYS_SEMCTL = 254 - SYS_SEMGET = 255 - SYS_SEMOP = 256 - SYS_MSGCTL = 258 - SYS_MSGGET = 259 - SYS_MSGSND = 260 - SYS_MSGRCV = 261 - SYS_SHMAT = 262 - SYS_SHMCTL = 263 - SYS_SHMDT = 264 - SYS_SHMGET = 265 - SYS_SHM_OPEN = 266 - SYS_SHM_UNLINK = 267 - SYS_SEM_OPEN = 268 - SYS_SEM_CLOSE = 269 - SYS_SEM_UNLINK = 270 - SYS_SEM_WAIT = 271 - SYS_SEM_TRYWAIT = 272 - SYS_SEM_POST = 273 - SYS_SYSCTLBYNAME = 274 - SYS_OPEN_EXTENDED = 277 - SYS_UMASK_EXTENDED = 278 - SYS_STAT_EXTENDED = 279 - SYS_LSTAT_EXTENDED = 280 - SYS_FSTAT_EXTENDED = 281 - SYS_CHMOD_EXTENDED = 282 - SYS_FCHMOD_EXTENDED = 283 - SYS_ACCESS_EXTENDED = 284 - SYS_SETTID = 285 - SYS_GETTID = 286 - SYS_SETSGROUPS = 287 - SYS_GETSGROUPS = 288 - SYS_SETWGROUPS = 289 - SYS_GETWGROUPS = 290 - SYS_MKFIFO_EXTENDED = 291 - SYS_MKDIR_EXTENDED = 292 - SYS_IDENTITYSVC = 293 - SYS_SHARED_REGION_CHECK_NP = 294 - SYS_VM_PRESSURE_MONITOR = 296 - SYS_PSYNCH_RW_LONGRDLOCK = 297 - SYS_PSYNCH_RW_YIELDWRLOCK = 298 - SYS_PSYNCH_RW_DOWNGRADE = 299 - SYS_PSYNCH_RW_UPGRADE = 300 - SYS_PSYNCH_MUTEXWAIT = 301 - SYS_PSYNCH_MUTEXDROP = 302 - SYS_PSYNCH_CVBROAD = 303 - SYS_PSYNCH_CVSIGNAL = 304 - SYS_PSYNCH_CVWAIT = 305 - SYS_PSYNCH_RW_RDLOCK = 306 - SYS_PSYNCH_RW_WRLOCK = 307 - SYS_PSYNCH_RW_UNLOCK = 308 - SYS_PSYNCH_RW_UNLOCK2 = 309 - SYS_GETSID = 310 - SYS_SETTID_WITH_PID = 311 - SYS_PSYNCH_CVCLRPREPOST = 312 - SYS_AIO_FSYNC = 313 - SYS_AIO_RETURN = 314 - SYS_AIO_SUSPEND = 315 - SYS_AIO_CANCEL = 316 - SYS_AIO_ERROR = 317 - SYS_AIO_READ = 318 - SYS_AIO_WRITE = 319 - SYS_LIO_LISTIO = 320 - SYS_IOPOLICYSYS = 322 - SYS_PROCESS_POLICY = 323 - SYS_MLOCKALL = 324 - SYS_MUNLOCKALL = 325 - SYS_ISSETUGID = 327 - SYS___PTHREAD_KILL = 328 - SYS___PTHREAD_SIGMASK = 329 - SYS___SIGWAIT = 330 - SYS___DISABLE_THREADSIGNAL = 331 - SYS___PTHREAD_MARKCANCEL = 332 - SYS___PTHREAD_CANCELED = 333 - SYS___SEMWAIT_SIGNAL = 334 - SYS_PROC_INFO = 336 - SYS_SENDFILE = 337 - SYS_STAT64 = 338 - SYS_FSTAT64 = 339 - SYS_LSTAT64 = 340 - SYS_STAT64_EXTENDED = 341 - SYS_LSTAT64_EXTENDED = 342 - SYS_FSTAT64_EXTENDED = 343 - SYS_GETDIRENTRIES64 = 344 - SYS_STATFS64 = 345 - SYS_FSTATFS64 = 346 - SYS_GETFSSTAT64 = 347 - SYS___PTHREAD_CHDIR = 348 - SYS___PTHREAD_FCHDIR = 349 - SYS_AUDIT = 350 - SYS_AUDITON = 351 - SYS_GETAUID = 353 - SYS_SETAUID = 354 - SYS_GETAUDIT_ADDR = 357 - SYS_SETAUDIT_ADDR = 358 - SYS_AUDITCTL = 359 - SYS_BSDTHREAD_CREATE = 360 - SYS_BSDTHREAD_TERMINATE = 361 - SYS_KQUEUE = 362 - SYS_KEVENT = 363 - SYS_LCHOWN = 364 - SYS_BSDTHREAD_REGISTER = 366 - SYS_WORKQ_OPEN = 367 - SYS_WORKQ_KERNRETURN = 368 - SYS_KEVENT64 = 369 - SYS___OLD_SEMWAIT_SIGNAL = 370 - SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 - SYS_THREAD_SELFID = 372 - SYS_LEDGER = 373 - SYS_KEVENT_QOS = 374 - SYS_KEVENT_ID = 375 - SYS___MAC_EXECVE = 380 - SYS___MAC_SYSCALL = 381 - SYS___MAC_GET_FILE = 382 - SYS___MAC_SET_FILE = 383 - SYS___MAC_GET_LINK = 384 - SYS___MAC_SET_LINK = 385 - SYS___MAC_GET_PROC = 386 - SYS___MAC_SET_PROC = 387 - SYS___MAC_GET_FD = 388 - SYS___MAC_SET_FD = 389 - SYS___MAC_GET_PID = 390 - SYS_PSELECT = 394 - SYS_PSELECT_NOCANCEL = 395 - SYS_READ_NOCANCEL = 396 - SYS_WRITE_NOCANCEL = 397 - SYS_OPEN_NOCANCEL = 398 - SYS_CLOSE_NOCANCEL = 399 - SYS_WAIT4_NOCANCEL = 400 - SYS_RECVMSG_NOCANCEL = 401 - SYS_SENDMSG_NOCANCEL = 402 - SYS_RECVFROM_NOCANCEL = 403 - SYS_ACCEPT_NOCANCEL = 404 - SYS_MSYNC_NOCANCEL = 405 - SYS_FCNTL_NOCANCEL = 406 - SYS_SELECT_NOCANCEL = 407 - SYS_FSYNC_NOCANCEL = 408 - SYS_CONNECT_NOCANCEL = 409 - SYS_SIGSUSPEND_NOCANCEL = 410 - SYS_READV_NOCANCEL = 411 - SYS_WRITEV_NOCANCEL = 412 - SYS_SENDTO_NOCANCEL = 413 - SYS_PREAD_NOCANCEL = 414 - SYS_PWRITE_NOCANCEL = 415 - SYS_WAITID_NOCANCEL = 416 - SYS_POLL_NOCANCEL = 417 - SYS_MSGSND_NOCANCEL = 418 - SYS_MSGRCV_NOCANCEL = 419 - SYS_SEM_WAIT_NOCANCEL = 420 - SYS_AIO_SUSPEND_NOCANCEL = 421 - SYS___SIGWAIT_NOCANCEL = 422 - SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 - SYS___MAC_MOUNT = 424 - SYS___MAC_GET_MOUNT = 425 - SYS___MAC_GETFSSTAT = 426 - SYS_FSGETPATH = 427 - SYS_AUDIT_SESSION_SELF = 428 - SYS_AUDIT_SESSION_JOIN = 429 - SYS_FILEPORT_MAKEPORT = 430 - SYS_FILEPORT_MAKEFD = 431 - SYS_AUDIT_SESSION_PORT = 432 - SYS_PID_SUSPEND = 433 - SYS_PID_RESUME = 434 - SYS_PID_HIBERNATE = 435 - SYS_PID_SHUTDOWN_SOCKETS = 436 - SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 - SYS_KAS_INFO = 439 - SYS_MEMORYSTATUS_CONTROL = 440 - SYS_GUARDED_OPEN_NP = 441 - SYS_GUARDED_CLOSE_NP = 442 - SYS_GUARDED_KQUEUE_NP = 443 - SYS_CHANGE_FDGUARD_NP = 444 - SYS_USRCTL = 445 - SYS_PROC_RLIMIT_CONTROL = 446 - SYS_CONNECTX = 447 - SYS_DISCONNECTX = 448 - SYS_PEELOFF = 449 - SYS_SOCKET_DELEGATE = 450 - SYS_TELEMETRY = 451 - SYS_PROC_UUID_POLICY = 452 - SYS_MEMORYSTATUS_GET_LEVEL = 453 - SYS_SYSTEM_OVERRIDE = 454 - SYS_VFS_PURGE = 455 - SYS_SFI_CTL = 456 - SYS_SFI_PIDCTL = 457 - SYS_COALITION = 458 - SYS_COALITION_INFO = 459 - SYS_NECP_MATCH_POLICY = 460 - SYS_GETATTRLISTBULK = 461 - SYS_CLONEFILEAT = 462 - SYS_OPENAT = 463 - SYS_OPENAT_NOCANCEL = 464 - SYS_RENAMEAT = 465 - SYS_FACCESSAT = 466 - SYS_FCHMODAT = 467 - SYS_FCHOWNAT = 468 - SYS_FSTATAT = 469 - SYS_FSTATAT64 = 470 - SYS_LINKAT = 471 - SYS_UNLINKAT = 472 - SYS_READLINKAT = 473 - SYS_SYMLINKAT = 474 - SYS_MKDIRAT = 475 - SYS_GETATTRLISTAT = 476 - SYS_PROC_TRACE_LOG = 477 - SYS_BSDTHREAD_CTL = 478 - SYS_OPENBYID_NP = 479 - SYS_RECVMSG_X = 480 - SYS_SENDMSG_X = 481 - SYS_THREAD_SELFUSAGE = 482 - SYS_CSRCTL = 483 - SYS_GUARDED_OPEN_DPROTECTED_NP = 484 - SYS_GUARDED_WRITE_NP = 485 - SYS_GUARDED_PWRITE_NP = 486 - SYS_GUARDED_WRITEV_NP = 487 - SYS_RENAMEATX_NP = 488 - SYS_MREMAP_ENCRYPTED = 489 - SYS_NETAGENT_TRIGGER = 490 - SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 - SYS_MICROSTACKSHOT = 492 - SYS_GRAB_PGO_DATA = 493 - SYS_PERSONA = 494 - SYS_WORK_INTERVAL_CTL = 499 - SYS_GETENTROPY = 500 - SYS_NECP_OPEN = 501 - SYS_NECP_CLIENT_ACTION = 502 - SYS___NEXUS_OPEN = 503 - SYS___NEXUS_REGISTER = 504 - SYS___NEXUS_DEREGISTER = 505 - SYS___NEXUS_CREATE = 506 - SYS___NEXUS_DESTROY = 507 - SYS___NEXUS_GET_OPT = 508 - SYS___NEXUS_SET_OPT = 509 - SYS___CHANNEL_OPEN = 510 - SYS___CHANNEL_GET_INFO = 511 - SYS___CHANNEL_SYNC = 512 - SYS___CHANNEL_GET_OPT = 513 - SYS___CHANNEL_SET_OPT = 514 - SYS_ULOCK_WAIT = 515 - SYS_ULOCK_WAKE = 516 - SYS_FCLONEFILEAT = 517 - SYS_FS_SNAPSHOT = 518 - SYS_TERMINATE_WITH_PAYLOAD = 520 - SYS_ABORT_WITH_PAYLOAD = 521 - SYS_NECP_SESSION_OPEN = 522 - SYS_NECP_SESSION_ACTION = 523 - SYS_SETATTRLISTAT = 524 - SYS_NET_QOS_GUIDELINE = 525 - SYS_FMOUNT = 526 - SYS_NTP_ADJTIME = 527 - SYS_NTP_GETTIME = 528 - SYS_OS_FAULT_WITH_PAYLOAD = 529 - SYS_MAXSYSCALL = 530 - SYS_INVALID = 63 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index a2fc91d6a..f8298ff9b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk/usr/include/sys/syscall.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && darwin // +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go deleted file mode 100644 index 20d7808ac..000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go +++ /dev/null @@ -1,437 +0,0 @@ -// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build arm,darwin - -package unix - -// Deprecated: Use libSystem wrappers instead of direct syscalls. -const ( - SYS_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_CHDIR = 12 - SYS_FCHDIR = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_CHOWN = 16 - SYS_GETFSSTAT = 18 - SYS_GETPID = 20 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_GETEUID = 25 - SYS_PTRACE = 26 - SYS_RECVMSG = 27 - SYS_SENDMSG = 28 - SYS_RECVFROM = 29 - SYS_ACCEPT = 30 - SYS_GETPEERNAME = 31 - SYS_GETSOCKNAME = 32 - SYS_ACCESS = 33 - SYS_CHFLAGS = 34 - SYS_FCHFLAGS = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_GETPPID = 39 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_GETEGID = 43 - SYS_SIGACTION = 46 - SYS_GETGID = 47 - SYS_SIGPROCMASK = 48 - SYS_GETLOGIN = 49 - SYS_SETLOGIN = 50 - SYS_ACCT = 51 - SYS_SIGPENDING = 52 - SYS_SIGALTSTACK = 53 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_REVOKE = 56 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETPGID = 82 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_GETDTABLESIZE = 89 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_GETPRIORITY = 100 - SYS_BIND = 104 - SYS_SETSOCKOPT = 105 - SYS_LISTEN = 106 - SYS_SIGSUSPEND = 111 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_FLOCK = 131 - SYS_MKFIFO = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_FUTIMES = 139 - SYS_ADJTIME = 140 - SYS_GETHOSTUUID = 142 - SYS_SETSID = 147 - SYS_GETPGID = 151 - SYS_SETPRIVEXEC = 152 - SYS_PREAD = 153 - SYS_PWRITE = 154 - SYS_NFSSVC = 155 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UNMOUNT = 159 - SYS_GETFH = 161 - SYS_QUOTACTL = 165 - SYS_MOUNT = 167 - SYS_CSOPS = 169 - SYS_CSOPS_AUDITTOKEN = 170 - SYS_WAITID = 173 - SYS_KDEBUG_TYPEFILTER = 177 - SYS_KDEBUG_TRACE_STRING = 178 - SYS_KDEBUG_TRACE64 = 179 - SYS_KDEBUG_TRACE = 180 - SYS_SETGID = 181 - SYS_SETEGID = 182 - SYS_SETEUID = 183 - SYS_SIGRETURN = 184 - SYS_THREAD_SELFCOUNTS = 186 - SYS_FDATASYNC = 187 - SYS_STAT = 188 - SYS_FSTAT = 189 - SYS_LSTAT = 190 - SYS_PATHCONF = 191 - SYS_FPATHCONF = 192 - SYS_GETRLIMIT = 194 - SYS_SETRLIMIT = 195 - SYS_GETDIRENTRIES = 196 - SYS_MMAP = 197 - SYS_LSEEK = 199 - SYS_TRUNCATE = 200 - SYS_FTRUNCATE = 201 - SYS_SYSCTL = 202 - SYS_MLOCK = 203 - SYS_MUNLOCK = 204 - SYS_UNDELETE = 205 - SYS_OPEN_DPROTECTED_NP = 216 - SYS_GETATTRLIST = 220 - SYS_SETATTRLIST = 221 - SYS_GETDIRENTRIESATTR = 222 - SYS_EXCHANGEDATA = 223 - SYS_SEARCHFS = 225 - SYS_DELETE = 226 - SYS_COPYFILE = 227 - SYS_FGETATTRLIST = 228 - SYS_FSETATTRLIST = 229 - SYS_POLL = 230 - SYS_WATCHEVENT = 231 - SYS_WAITEVENT = 232 - SYS_MODWATCH = 233 - SYS_GETXATTR = 234 - SYS_FGETXATTR = 235 - SYS_SETXATTR = 236 - SYS_FSETXATTR = 237 - SYS_REMOVEXATTR = 238 - SYS_FREMOVEXATTR = 239 - SYS_LISTXATTR = 240 - SYS_FLISTXATTR = 241 - SYS_FSCTL = 242 - SYS_INITGROUPS = 243 - SYS_POSIX_SPAWN = 244 - SYS_FFSCTL = 245 - SYS_NFSCLNT = 247 - SYS_FHOPEN = 248 - SYS_MINHERIT = 250 - SYS_SEMSYS = 251 - SYS_MSGSYS = 252 - SYS_SHMSYS = 253 - SYS_SEMCTL = 254 - SYS_SEMGET = 255 - SYS_SEMOP = 256 - SYS_MSGCTL = 258 - SYS_MSGGET = 259 - SYS_MSGSND = 260 - SYS_MSGRCV = 261 - SYS_SHMAT = 262 - SYS_SHMCTL = 263 - SYS_SHMDT = 264 - SYS_SHMGET = 265 - SYS_SHM_OPEN = 266 - SYS_SHM_UNLINK = 267 - SYS_SEM_OPEN = 268 - SYS_SEM_CLOSE = 269 - SYS_SEM_UNLINK = 270 - SYS_SEM_WAIT = 271 - SYS_SEM_TRYWAIT = 272 - SYS_SEM_POST = 273 - SYS_SYSCTLBYNAME = 274 - SYS_OPEN_EXTENDED = 277 - SYS_UMASK_EXTENDED = 278 - SYS_STAT_EXTENDED = 279 - SYS_LSTAT_EXTENDED = 280 - SYS_FSTAT_EXTENDED = 281 - SYS_CHMOD_EXTENDED = 282 - SYS_FCHMOD_EXTENDED = 283 - SYS_ACCESS_EXTENDED = 284 - SYS_SETTID = 285 - SYS_GETTID = 286 - SYS_SETSGROUPS = 287 - SYS_GETSGROUPS = 288 - SYS_SETWGROUPS = 289 - SYS_GETWGROUPS = 290 - SYS_MKFIFO_EXTENDED = 291 - SYS_MKDIR_EXTENDED = 292 - SYS_IDENTITYSVC = 293 - SYS_SHARED_REGION_CHECK_NP = 294 - SYS_VM_PRESSURE_MONITOR = 296 - SYS_PSYNCH_RW_LONGRDLOCK = 297 - SYS_PSYNCH_RW_YIELDWRLOCK = 298 - SYS_PSYNCH_RW_DOWNGRADE = 299 - SYS_PSYNCH_RW_UPGRADE = 300 - SYS_PSYNCH_MUTEXWAIT = 301 - SYS_PSYNCH_MUTEXDROP = 302 - SYS_PSYNCH_CVBROAD = 303 - SYS_PSYNCH_CVSIGNAL = 304 - SYS_PSYNCH_CVWAIT = 305 - SYS_PSYNCH_RW_RDLOCK = 306 - SYS_PSYNCH_RW_WRLOCK = 307 - SYS_PSYNCH_RW_UNLOCK = 308 - SYS_PSYNCH_RW_UNLOCK2 = 309 - SYS_GETSID = 310 - SYS_SETTID_WITH_PID = 311 - SYS_PSYNCH_CVCLRPREPOST = 312 - SYS_AIO_FSYNC = 313 - SYS_AIO_RETURN = 314 - SYS_AIO_SUSPEND = 315 - SYS_AIO_CANCEL = 316 - SYS_AIO_ERROR = 317 - SYS_AIO_READ = 318 - SYS_AIO_WRITE = 319 - SYS_LIO_LISTIO = 320 - SYS_IOPOLICYSYS = 322 - SYS_PROCESS_POLICY = 323 - SYS_MLOCKALL = 324 - SYS_MUNLOCKALL = 325 - SYS_ISSETUGID = 327 - SYS___PTHREAD_KILL = 328 - SYS___PTHREAD_SIGMASK = 329 - SYS___SIGWAIT = 330 - SYS___DISABLE_THREADSIGNAL = 331 - SYS___PTHREAD_MARKCANCEL = 332 - SYS___PTHREAD_CANCELED = 333 - SYS___SEMWAIT_SIGNAL = 334 - SYS_PROC_INFO = 336 - SYS_SENDFILE = 337 - SYS_STAT64 = 338 - SYS_FSTAT64 = 339 - SYS_LSTAT64 = 340 - SYS_STAT64_EXTENDED = 341 - SYS_LSTAT64_EXTENDED = 342 - SYS_FSTAT64_EXTENDED = 343 - SYS_GETDIRENTRIES64 = 344 - SYS_STATFS64 = 345 - SYS_FSTATFS64 = 346 - SYS_GETFSSTAT64 = 347 - SYS___PTHREAD_CHDIR = 348 - SYS___PTHREAD_FCHDIR = 349 - SYS_AUDIT = 350 - SYS_AUDITON = 351 - SYS_GETAUID = 353 - SYS_SETAUID = 354 - SYS_GETAUDIT_ADDR = 357 - SYS_SETAUDIT_ADDR = 358 - SYS_AUDITCTL = 359 - SYS_BSDTHREAD_CREATE = 360 - SYS_BSDTHREAD_TERMINATE = 361 - SYS_KQUEUE = 362 - SYS_KEVENT = 363 - SYS_LCHOWN = 364 - SYS_BSDTHREAD_REGISTER = 366 - SYS_WORKQ_OPEN = 367 - SYS_WORKQ_KERNRETURN = 368 - SYS_KEVENT64 = 369 - SYS___OLD_SEMWAIT_SIGNAL = 370 - SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 - SYS_THREAD_SELFID = 372 - SYS_LEDGER = 373 - SYS_KEVENT_QOS = 374 - SYS_KEVENT_ID = 375 - SYS___MAC_EXECVE = 380 - SYS___MAC_SYSCALL = 381 - SYS___MAC_GET_FILE = 382 - SYS___MAC_SET_FILE = 383 - SYS___MAC_GET_LINK = 384 - SYS___MAC_SET_LINK = 385 - SYS___MAC_GET_PROC = 386 - SYS___MAC_SET_PROC = 387 - SYS___MAC_GET_FD = 388 - SYS___MAC_SET_FD = 389 - SYS___MAC_GET_PID = 390 - SYS_PSELECT = 394 - SYS_PSELECT_NOCANCEL = 395 - SYS_READ_NOCANCEL = 396 - SYS_WRITE_NOCANCEL = 397 - SYS_OPEN_NOCANCEL = 398 - SYS_CLOSE_NOCANCEL = 399 - SYS_WAIT4_NOCANCEL = 400 - SYS_RECVMSG_NOCANCEL = 401 - SYS_SENDMSG_NOCANCEL = 402 - SYS_RECVFROM_NOCANCEL = 403 - SYS_ACCEPT_NOCANCEL = 404 - SYS_MSYNC_NOCANCEL = 405 - SYS_FCNTL_NOCANCEL = 406 - SYS_SELECT_NOCANCEL = 407 - SYS_FSYNC_NOCANCEL = 408 - SYS_CONNECT_NOCANCEL = 409 - SYS_SIGSUSPEND_NOCANCEL = 410 - SYS_READV_NOCANCEL = 411 - SYS_WRITEV_NOCANCEL = 412 - SYS_SENDTO_NOCANCEL = 413 - SYS_PREAD_NOCANCEL = 414 - SYS_PWRITE_NOCANCEL = 415 - SYS_WAITID_NOCANCEL = 416 - SYS_POLL_NOCANCEL = 417 - SYS_MSGSND_NOCANCEL = 418 - SYS_MSGRCV_NOCANCEL = 419 - SYS_SEM_WAIT_NOCANCEL = 420 - SYS_AIO_SUSPEND_NOCANCEL = 421 - SYS___SIGWAIT_NOCANCEL = 422 - SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 - SYS___MAC_MOUNT = 424 - SYS___MAC_GET_MOUNT = 425 - SYS___MAC_GETFSSTAT = 426 - SYS_FSGETPATH = 427 - SYS_AUDIT_SESSION_SELF = 428 - SYS_AUDIT_SESSION_JOIN = 429 - SYS_FILEPORT_MAKEPORT = 430 - SYS_FILEPORT_MAKEFD = 431 - SYS_AUDIT_SESSION_PORT = 432 - SYS_PID_SUSPEND = 433 - SYS_PID_RESUME = 434 - SYS_PID_HIBERNATE = 435 - SYS_PID_SHUTDOWN_SOCKETS = 436 - SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 - SYS_KAS_INFO = 439 - SYS_MEMORYSTATUS_CONTROL = 440 - SYS_GUARDED_OPEN_NP = 441 - SYS_GUARDED_CLOSE_NP = 442 - SYS_GUARDED_KQUEUE_NP = 443 - SYS_CHANGE_FDGUARD_NP = 444 - SYS_USRCTL = 445 - SYS_PROC_RLIMIT_CONTROL = 446 - SYS_CONNECTX = 447 - SYS_DISCONNECTX = 448 - SYS_PEELOFF = 449 - SYS_SOCKET_DELEGATE = 450 - SYS_TELEMETRY = 451 - SYS_PROC_UUID_POLICY = 452 - SYS_MEMORYSTATUS_GET_LEVEL = 453 - SYS_SYSTEM_OVERRIDE = 454 - SYS_VFS_PURGE = 455 - SYS_SFI_CTL = 456 - SYS_SFI_PIDCTL = 457 - SYS_COALITION = 458 - SYS_COALITION_INFO = 459 - SYS_NECP_MATCH_POLICY = 460 - SYS_GETATTRLISTBULK = 461 - SYS_CLONEFILEAT = 462 - SYS_OPENAT = 463 - SYS_OPENAT_NOCANCEL = 464 - SYS_RENAMEAT = 465 - SYS_FACCESSAT = 466 - SYS_FCHMODAT = 467 - SYS_FCHOWNAT = 468 - SYS_FSTATAT = 469 - SYS_FSTATAT64 = 470 - SYS_LINKAT = 471 - SYS_UNLINKAT = 472 - SYS_READLINKAT = 473 - SYS_SYMLINKAT = 474 - SYS_MKDIRAT = 475 - SYS_GETATTRLISTAT = 476 - SYS_PROC_TRACE_LOG = 477 - SYS_BSDTHREAD_CTL = 478 - SYS_OPENBYID_NP = 479 - SYS_RECVMSG_X = 480 - SYS_SENDMSG_X = 481 - SYS_THREAD_SELFUSAGE = 482 - SYS_CSRCTL = 483 - SYS_GUARDED_OPEN_DPROTECTED_NP = 484 - SYS_GUARDED_WRITE_NP = 485 - SYS_GUARDED_PWRITE_NP = 486 - SYS_GUARDED_WRITEV_NP = 487 - SYS_RENAMEATX_NP = 488 - SYS_MREMAP_ENCRYPTED = 489 - SYS_NETAGENT_TRIGGER = 490 - SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 - SYS_MICROSTACKSHOT = 492 - SYS_GRAB_PGO_DATA = 493 - SYS_PERSONA = 494 - SYS_WORK_INTERVAL_CTL = 499 - SYS_GETENTROPY = 500 - SYS_NECP_OPEN = 501 - SYS_NECP_CLIENT_ACTION = 502 - SYS___NEXUS_OPEN = 503 - SYS___NEXUS_REGISTER = 504 - SYS___NEXUS_DEREGISTER = 505 - SYS___NEXUS_CREATE = 506 - SYS___NEXUS_DESTROY = 507 - SYS___NEXUS_GET_OPT = 508 - SYS___NEXUS_SET_OPT = 509 - SYS___CHANNEL_OPEN = 510 - SYS___CHANNEL_GET_INFO = 511 - SYS___CHANNEL_SYNC = 512 - SYS___CHANNEL_GET_OPT = 513 - SYS___CHANNEL_SET_OPT = 514 - SYS_ULOCK_WAIT = 515 - SYS_ULOCK_WAKE = 516 - SYS_FCLONEFILEAT = 517 - SYS_FS_SNAPSHOT = 518 - SYS_TERMINATE_WITH_PAYLOAD = 520 - SYS_ABORT_WITH_PAYLOAD = 521 - SYS_NECP_SESSION_OPEN = 522 - SYS_NECP_SESSION_ACTION = 523 - SYS_SETATTRLISTAT = 524 - SYS_NET_QOS_GUIDELINE = 525 - SYS_FMOUNT = 526 - SYS_NTP_ADJTIME = 527 - SYS_NTP_GETTIME = 528 - SYS_OS_FAULT_WITH_PAYLOAD = 529 - SYS_MAXSYSCALL = 530 - SYS_INVALID = 63 -) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 527b9588c..5eb433bbf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && darwin // +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 9912c6ee3..703675c0c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && dragonfly // +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 9474974b6..59d5dfc20 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && freebsd // +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 48a7beae7..342d471d2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && freebsd // +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index 4a6dfd4a7..e2e3d72c5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && freebsd // +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 3e51af8ed..61ad5ca3c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && freebsd // +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index f6742bdee..fbc59b7fd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && linux // +build 386,linux package unix @@ -436,4 +437,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f7e525573..04d16d771 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && linux // +build amd64,linux package unix @@ -358,4 +359,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 3f60977da..3b1c10513 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && linux // +build arm,linux package unix @@ -400,4 +401,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index dbedf4cba..3198adcf7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && linux // +build arm64,linux package unix @@ -303,4 +304,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index eeff7e1dc..c877ec6e6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips && linux // +build mips,linux package unix @@ -421,4 +422,6 @@ const ( SYS_PIDFD_GETFD = 4438 SYS_FACCESSAT2 = 4439 SYS_PROCESS_MADVISE = 4440 + SYS_EPOLL_PWAIT2 = 4441 + SYS_MOUNT_SETATTR = 4442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 73cfa535c..b5f290372 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && linux // +build mips64,linux package unix @@ -351,4 +352,6 @@ const ( SYS_PIDFD_GETFD = 5438 SYS_FACCESSAT2 = 5439 SYS_PROCESS_MADVISE = 5440 + SYS_EPOLL_PWAIT2 = 5441 + SYS_MOUNT_SETATTR = 5442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index be74729e0..46077689a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64le && linux // +build mips64le,linux package unix @@ -351,4 +352,6 @@ const ( SYS_PIDFD_GETFD = 5438 SYS_FACCESSAT2 = 5439 SYS_PROCESS_MADVISE = 5440 + SYS_EPOLL_PWAIT2 = 5441 + SYS_MOUNT_SETATTR = 5442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 2a1047c81..80e6696b3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mipsle && linux // +build mipsle,linux package unix @@ -421,4 +422,6 @@ const ( SYS_PIDFD_GETFD = 4438 SYS_FACCESSAT2 = 4439 SYS_PROCESS_MADVISE = 4440 + SYS_EPOLL_PWAIT2 = 4441 + SYS_MOUNT_SETATTR = 4442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go new file mode 100644 index 000000000..b9d697ffb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -0,0 +1,434 @@ +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc && linux +// +build ppc,linux + +package unix + +const ( + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_FCNTL64 = 204 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_SENDFILE64 = 226 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_FADVISE64_64 = 254 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_FSTATAT64 = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 + SYS_EXECVEAT = 362 + SYS_SWITCH_ENDIAN = 363 + SYS_USERFAULTFD = 364 + SYS_MEMBARRIER = 365 + SYS_MLOCK2 = 378 + SYS_COPY_FILE_RANGE = 379 + SYS_PREADV2 = 380 + SYS_PWRITEV2 = 381 + SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 + SYS_PKEY_ALLOC = 384 + SYS_PKEY_FREE = 385 + SYS_PKEY_MPROTECT = 386 + SYS_RSEQ = 387 + SYS_IO_PGETEVENTS = 388 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_CLOCK_GETTIME64 = 403 + SYS_CLOCK_SETTIME64 = 404 + SYS_CLOCK_ADJTIME64 = 405 + SYS_CLOCK_GETRES_TIME64 = 406 + SYS_CLOCK_NANOSLEEP_TIME64 = 407 + SYS_TIMER_GETTIME64 = 408 + SYS_TIMER_SETTIME64 = 409 + SYS_TIMERFD_GETTIME64 = 410 + SYS_TIMERFD_SETTIME64 = 411 + SYS_UTIMENSAT_TIME64 = 412 + SYS_PSELECT6_TIME64 = 413 + SYS_PPOLL_TIME64 = 414 + SYS_IO_PGETEVENTS_TIME64 = 416 + SYS_RECVMMSG_TIME64 = 417 + SYS_MQ_TIMEDSEND_TIME64 = 418 + SYS_MQ_TIMEDRECEIVE_TIME64 = 419 + SYS_SEMTIMEDOP_TIME64 = 420 + SYS_RT_SIGTIMEDWAIT_TIME64 = 421 + SYS_FUTEX_TIME64 = 422 + SYS_SCHED_RR_GET_INTERVAL_TIME64 = 423 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 32707428c..08edc54d3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && linux // +build ppc64,linux package unix @@ -400,4 +401,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index a58572f78..33b33b083 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64le && linux // +build ppc64le,linux package unix @@ -400,4 +401,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 72a65b760..66c8a8e09 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build riscv64 && linux // +build riscv64,linux package unix @@ -302,4 +303,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 1fb9ae5d4..aea5760ce 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build s390x && linux // +build s390x,linux package unix @@ -365,4 +366,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 57636e09e..488ca848d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build sparc64 && linux // +build sparc64,linux package unix @@ -379,4 +380,6 @@ const ( SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index e66a8c9d3..3a6699eba 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && netbsd // +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 42c788f24..5677cd4f1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && netbsd // +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index 0a0757179..e784cb6db 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && netbsd // +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index 0291c0931..bd4952efa 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; DO NOT EDIT. +//go:build arm64 && netbsd // +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index b0207d1c9..817edbf95 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index f0dec6f0b..ea453614e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index 33d1dc540..467971eed 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index fe2b689b6..32eec5ed5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 5c08d573b..a37f77375 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go new file mode 100644 index 000000000..073daad43 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -0,0 +1,2670 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +// TODO: auto-generate. + +const ( + SYS_ACOSD128 = 0xB80 + SYS_ACOSD32 = 0xB7E + SYS_ACOSD64 = 0xB7F + SYS_ACOSHD128 = 0xB83 + SYS_ACOSHD32 = 0xB81 + SYS_ACOSHD64 = 0xB82 + SYS_AIO_FSYNC = 0xC69 + SYS_ASCTIME = 0x0AE + SYS_ASCTIME64 = 0xCD7 + SYS_ASCTIME64_R = 0xCD8 + SYS_ASIND128 = 0xB86 + SYS_ASIND32 = 0xB84 + SYS_ASIND64 = 0xB85 + SYS_ASINHD128 = 0xB89 + SYS_ASINHD32 = 0xB87 + SYS_ASINHD64 = 0xB88 + SYS_ATAN2D128 = 0xB8F + SYS_ATAN2D32 = 0xB8D + SYS_ATAN2D64 = 0xB8E + SYS_ATAND128 = 0xB8C + SYS_ATAND32 = 0xB8A + SYS_ATAND64 = 0xB8B + SYS_ATANHD128 = 0xB92 + SYS_ATANHD32 = 0xB90 + SYS_ATANHD64 = 0xB91 + SYS_BIND2ADDRSEL = 0xD59 + SYS_C16RTOMB = 0xD40 + SYS_C32RTOMB = 0xD41 + SYS_CBRTD128 = 0xB95 + SYS_CBRTD32 = 0xB93 + SYS_CBRTD64 = 0xB94 + SYS_CEILD128 = 0xB98 + SYS_CEILD32 = 0xB96 + SYS_CEILD64 = 0xB97 + SYS_CLEARENV = 0x0C9 + SYS_CLEARERR_UNLOCKED = 0xCA1 + SYS_CLOCK = 0x0AA + SYS_CLOGL = 0xA00 + SYS_CLRMEMF = 0x0BD + SYS_CONJ = 0xA03 + SYS_CONJF = 0xA06 + SYS_CONJL = 0xA09 + SYS_COPYSIGND128 = 0xB9E + SYS_COPYSIGND32 = 0xB9C + SYS_COPYSIGND64 = 0xB9D + SYS_COSD128 = 0xBA1 + SYS_COSD32 = 0xB9F + SYS_COSD64 = 0xBA0 + SYS_COSHD128 = 0xBA4 + SYS_COSHD32 = 0xBA2 + SYS_COSHD64 = 0xBA3 + SYS_CPOW = 0xA0C + SYS_CPOWF = 0xA0F + SYS_CPOWL = 0xA12 + SYS_CPROJ = 0xA15 + SYS_CPROJF = 0xA18 + SYS_CPROJL = 0xA1B + SYS_CREAL = 0xA1E + SYS_CREALF = 0xA21 + SYS_CREALL = 0xA24 + SYS_CSIN = 0xA27 + SYS_CSINF = 0xA2A + SYS_CSINH = 0xA30 + SYS_CSINHF = 0xA33 + SYS_CSINHL = 0xA36 + SYS_CSINL = 0xA2D + SYS_CSNAP = 0x0C5 + SYS_CSQRT = 0xA39 + SYS_CSQRTF = 0xA3C + SYS_CSQRTL = 0xA3F + SYS_CTAN = 0xA42 + SYS_CTANF = 0xA45 + SYS_CTANH = 0xA4B + SYS_CTANHF = 0xA4E + SYS_CTANHL = 0xA51 + SYS_CTANL = 0xA48 + SYS_CTIME = 0x0AB + SYS_CTIME64 = 0xCD9 + SYS_CTIME64_R = 0xCDA + SYS_CTRACE = 0x0C6 + SYS_DIFFTIME = 0x0A7 + SYS_DIFFTIME64 = 0xCDB + SYS_DLADDR = 0xC82 + SYS_DYNALLOC = 0x0C3 + SYS_DYNFREE = 0x0C2 + SYS_ERFCD128 = 0xBAA + SYS_ERFCD32 = 0xBA8 + SYS_ERFCD64 = 0xBA9 + SYS_ERFD128 = 0xBA7 + SYS_ERFD32 = 0xBA5 + SYS_ERFD64 = 0xBA6 + SYS_EXP2D128 = 0xBB0 + SYS_EXP2D32 = 0xBAE + SYS_EXP2D64 = 0xBAF + SYS_EXPD128 = 0xBAD + SYS_EXPD32 = 0xBAB + SYS_EXPD64 = 0xBAC + SYS_EXPM1D128 = 0xBB3 + SYS_EXPM1D32 = 0xBB1 + SYS_EXPM1D64 = 0xBB2 + SYS_FABSD128 = 0xBB6 + SYS_FABSD32 = 0xBB4 + SYS_FABSD64 = 0xBB5 + SYS_FDELREC_UNLOCKED = 0xCA2 + SYS_FDIMD128 = 0xBB9 + SYS_FDIMD32 = 0xBB7 + SYS_FDIMD64 = 0xBB8 + SYS_FDOPEN_UNLOCKED = 0xCFC + SYS_FECLEAREXCEPT = 0xAEA + SYS_FEGETENV = 0xAEB + SYS_FEGETEXCEPTFLAG = 0xAEC + SYS_FEGETROUND = 0xAED + SYS_FEHOLDEXCEPT = 0xAEE + SYS_FEOF_UNLOCKED = 0xCA3 + SYS_FERAISEEXCEPT = 0xAEF + SYS_FERROR_UNLOCKED = 0xCA4 + SYS_FESETENV = 0xAF0 + SYS_FESETEXCEPTFLAG = 0xAF1 + SYS_FESETROUND = 0xAF2 + SYS_FETCHEP = 0x0BF + SYS_FETESTEXCEPT = 0xAF3 + SYS_FEUPDATEENV = 0xAF4 + SYS_FE_DEC_GETROUND = 0xBBA + SYS_FE_DEC_SETROUND = 0xBBB + SYS_FFLUSH_UNLOCKED = 0xCA5 + SYS_FGETC_UNLOCKED = 0xC80 + SYS_FGETPOS64 = 0xCEE + SYS_FGETPOS64_UNLOCKED = 0xCF4 + SYS_FGETPOS_UNLOCKED = 0xCA6 + SYS_FGETS_UNLOCKED = 0xC7C + SYS_FGETWC_UNLOCKED = 0xCA7 + SYS_FGETWS_UNLOCKED = 0xCA8 + SYS_FILENO_UNLOCKED = 0xCA9 + SYS_FLDATA = 0x0C1 + SYS_FLDATA_UNLOCKED = 0xCAA + SYS_FLOCATE_UNLOCKED = 0xCAB + SYS_FLOORD128 = 0xBBE + SYS_FLOORD32 = 0xBBC + SYS_FLOORD64 = 0xBBD + SYS_FMA = 0xA63 + SYS_FMAD128 = 0xBC1 + SYS_FMAD32 = 0xBBF + SYS_FMAD64 = 0xBC0 + SYS_FMAF = 0xA66 + SYS_FMAL = 0xA69 + SYS_FMAX = 0xA6C + SYS_FMAXD128 = 0xBC4 + SYS_FMAXD32 = 0xBC2 + SYS_FMAXD64 = 0xBC3 + SYS_FMAXF = 0xA6F + SYS_FMAXL = 0xA72 + SYS_FMIN = 0xA75 + SYS_FMIND128 = 0xBC7 + SYS_FMIND32 = 0xBC5 + SYS_FMIND64 = 0xBC6 + SYS_FMINF = 0xA78 + SYS_FMINL = 0xA7B + SYS_FMODD128 = 0xBCA + SYS_FMODD32 = 0xBC8 + SYS_FMODD64 = 0xBC9 + SYS_FOPEN64 = 0xD49 + SYS_FOPEN64_UNLOCKED = 0xD4A + SYS_FOPEN_UNLOCKED = 0xCFA + SYS_FPRINTF_UNLOCKED = 0xCAC + SYS_FPUTC_UNLOCKED = 0xC81 + SYS_FPUTS_UNLOCKED = 0xC7E + SYS_FPUTWC_UNLOCKED = 0xCAD + SYS_FPUTWS_UNLOCKED = 0xCAE + SYS_FREAD_NOUPDATE = 0xCEC + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED + SYS_FREAD_UNLOCKED = 0xC7B + SYS_FREEIFADDRS = 0xCE6 + SYS_FREOPEN64 = 0xD4B + SYS_FREOPEN64_UNLOCKED = 0xD4C + SYS_FREOPEN_UNLOCKED = 0xCFB + SYS_FREXPD128 = 0xBCE + SYS_FREXPD32 = 0xBCC + SYS_FREXPD64 = 0xBCD + SYS_FSCANF_UNLOCKED = 0xCAF + SYS_FSEEK64 = 0xCEF + SYS_FSEEK64_UNLOCKED = 0xCF5 + SYS_FSEEKO64 = 0xCF0 + SYS_FSEEKO64_UNLOCKED = 0xCF6 + SYS_FSEEKO_UNLOCKED = 0xCB1 + SYS_FSEEK_UNLOCKED = 0xCB0 + SYS_FSETPOS64 = 0xCF1 + SYS_FSETPOS64_UNLOCKED = 0xCF7 + SYS_FSETPOS_UNLOCKED = 0xCB3 + SYS_FTELL64 = 0xCF2 + SYS_FTELL64_UNLOCKED = 0xCF8 + SYS_FTELLO64 = 0xCF3 + SYS_FTELLO64_UNLOCKED = 0xCF9 + SYS_FTELLO_UNLOCKED = 0xCB5 + SYS_FTELL_UNLOCKED = 0xCB4 + SYS_FUPDATE = 0x0B5 + SYS_FUPDATE_UNLOCKED = 0xCB7 + SYS_FWIDE_UNLOCKED = 0xCB8 + SYS_FWPRINTF_UNLOCKED = 0xCB9 + SYS_FWRITE_UNLOCKED = 0xC7A + SYS_FWSCANF_UNLOCKED = 0xCBA + SYS_GETDATE64 = 0xD4F + SYS_GETIFADDRS = 0xCE7 + SYS_GETIPV4SOURCEFILTER = 0xC77 + SYS_GETSOURCEFILTER = 0xC79 + SYS_GETSYNTX = 0x0FD + SYS_GETS_UNLOCKED = 0xC7D + SYS_GETTIMEOFDAY64 = 0xD50 + SYS_GETWCHAR_UNLOCKED = 0xCBC + SYS_GETWC_UNLOCKED = 0xCBB + SYS_GMTIME = 0x0B0 + SYS_GMTIME64 = 0xCDC + SYS_GMTIME64_R = 0xCDD + SYS_HYPOTD128 = 0xBD1 + SYS_HYPOTD32 = 0xBCF + SYS_HYPOTD64 = 0xBD0 + SYS_ILOGBD128 = 0xBD4 + SYS_ILOGBD32 = 0xBD2 + SYS_ILOGBD64 = 0xBD3 + SYS_ILOGBF = 0xA7E + SYS_ILOGBL = 0xA81 + SYS_INET6_IS_SRCADDR = 0xD5A + SYS_ISBLANK = 0x0FE + SYS_ISWALNUM = 0x0FF + SYS_LDEXPD128 = 0xBD7 + SYS_LDEXPD32 = 0xBD5 + SYS_LDEXPD64 = 0xBD6 + SYS_LGAMMAD128 = 0xBDA + SYS_LGAMMAD32 = 0xBD8 + SYS_LGAMMAD64 = 0xBD9 + SYS_LIO_LISTIO = 0xC6A + SYS_LLRINT = 0xA84 + SYS_LLRINTD128 = 0xBDD + SYS_LLRINTD32 = 0xBDB + SYS_LLRINTD64 = 0xBDC + SYS_LLRINTF = 0xA87 + SYS_LLRINTL = 0xA8A + SYS_LLROUND = 0xA8D + SYS_LLROUNDD128 = 0xBE0 + SYS_LLROUNDD32 = 0xBDE + SYS_LLROUNDD64 = 0xBDF + SYS_LLROUNDF = 0xA90 + SYS_LLROUNDL = 0xA93 + SYS_LOCALTIM = 0x0B1 + SYS_LOCALTIME = 0x0B1 + SYS_LOCALTIME64 = 0xCDE + SYS_LOCALTIME64_R = 0xCDF + SYS_LOG10D128 = 0xBE6 + SYS_LOG10D32 = 0xBE4 + SYS_LOG10D64 = 0xBE5 + SYS_LOG1PD128 = 0xBE9 + SYS_LOG1PD32 = 0xBE7 + SYS_LOG1PD64 = 0xBE8 + SYS_LOG2D128 = 0xBEC + SYS_LOG2D32 = 0xBEA + SYS_LOG2D64 = 0xBEB + SYS_LOGBD128 = 0xBEF + SYS_LOGBD32 = 0xBED + SYS_LOGBD64 = 0xBEE + SYS_LOGBF = 0xA96 + SYS_LOGBL = 0xA99 + SYS_LOGD128 = 0xBE3 + SYS_LOGD32 = 0xBE1 + SYS_LOGD64 = 0xBE2 + SYS_LRINT = 0xA9C + SYS_LRINTD128 = 0xBF2 + SYS_LRINTD32 = 0xBF0 + SYS_LRINTD64 = 0xBF1 + SYS_LRINTF = 0xA9F + SYS_LRINTL = 0xAA2 + SYS_LROUNDD128 = 0xBF5 + SYS_LROUNDD32 = 0xBF3 + SYS_LROUNDD64 = 0xBF4 + SYS_LROUNDL = 0xAA5 + SYS_MBLEN = 0x0AF + SYS_MBRTOC16 = 0xD42 + SYS_MBRTOC32 = 0xD43 + SYS_MEMSET = 0x0A3 + SYS_MKTIME = 0x0AC + SYS_MKTIME64 = 0xCE0 + SYS_MODFD128 = 0xBF8 + SYS_MODFD32 = 0xBF6 + SYS_MODFD64 = 0xBF7 + SYS_NAN = 0xAA8 + SYS_NAND128 = 0xBFB + SYS_NAND32 = 0xBF9 + SYS_NAND64 = 0xBFA + SYS_NANF = 0xAAA + SYS_NANL = 0xAAC + SYS_NEARBYINT = 0xAAE + SYS_NEARBYINTD128 = 0xBFE + SYS_NEARBYINTD32 = 0xBFC + SYS_NEARBYINTD64 = 0xBFD + SYS_NEARBYINTF = 0xAB1 + SYS_NEARBYINTL = 0xAB4 + SYS_NEXTAFTERD128 = 0xC01 + SYS_NEXTAFTERD32 = 0xBFF + SYS_NEXTAFTERD64 = 0xC00 + SYS_NEXTAFTERF = 0xAB7 + SYS_NEXTAFTERL = 0xABA + SYS_NEXTTOWARD = 0xABD + SYS_NEXTTOWARDD128 = 0xC04 + SYS_NEXTTOWARDD32 = 0xC02 + SYS_NEXTTOWARDD64 = 0xC03 + SYS_NEXTTOWARDF = 0xAC0 + SYS_NEXTTOWARDL = 0xAC3 + SYS_NL_LANGINFO = 0x0FC + SYS_PERROR_UNLOCKED = 0xCBD + SYS_POSIX_FALLOCATE = 0xCE8 + SYS_POSIX_MEMALIGN = 0xCE9 + SYS_POSIX_OPENPT = 0xC66 + SYS_POWD128 = 0xC07 + SYS_POWD32 = 0xC05 + SYS_POWD64 = 0xC06 + SYS_PRINTF_UNLOCKED = 0xCBE + SYS_PSELECT = 0xC67 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 + SYS_PUTS_UNLOCKED = 0xC7F + SYS_PUTWCHAR_UNLOCKED = 0xCC0 + SYS_PUTWC_UNLOCKED = 0xCBF + SYS_QUANTEXPD128 = 0xD46 + SYS_QUANTEXPD32 = 0xD44 + SYS_QUANTEXPD64 = 0xD45 + SYS_QUANTIZED128 = 0xC0A + SYS_QUANTIZED32 = 0xC08 + SYS_QUANTIZED64 = 0xC09 + SYS_REMAINDERD128 = 0xC0D + SYS_REMAINDERD32 = 0xC0B + SYS_REMAINDERD64 = 0xC0C + SYS_RESIZE_ALLOC = 0xCEB + SYS_REWIND_UNLOCKED = 0xCC1 + SYS_RINTD128 = 0xC13 + SYS_RINTD32 = 0xC11 + SYS_RINTD64 = 0xC12 + SYS_RINTF = 0xACB + SYS_RINTL = 0xACD + SYS_ROUND = 0xACF + SYS_ROUNDD128 = 0xC16 + SYS_ROUNDD32 = 0xC14 + SYS_ROUNDD64 = 0xC15 + SYS_ROUNDF = 0xAD2 + SYS_ROUNDL = 0xAD5 + SYS_SAMEQUANTUMD128 = 0xC19 + SYS_SAMEQUANTUMD32 = 0xC17 + SYS_SAMEQUANTUMD64 = 0xC18 + SYS_SCALBLN = 0xAD8 + SYS_SCALBLND128 = 0xC1C + SYS_SCALBLND32 = 0xC1A + SYS_SCALBLND64 = 0xC1B + SYS_SCALBLNF = 0xADB + SYS_SCALBLNL = 0xADE + SYS_SCALBND128 = 0xC1F + SYS_SCALBND32 = 0xC1D + SYS_SCALBND64 = 0xC1E + SYS_SCALBNF = 0xAE3 + SYS_SCALBNL = 0xAE6 + SYS_SCANF_UNLOCKED = 0xCC2 + SYS_SCHED_YIELD = 0xB32 + SYS_SETENV = 0x0C8 + SYS_SETIPV4SOURCEFILTER = 0xC76 + SYS_SETSOURCEFILTER = 0xC78 + SYS_SHM_OPEN = 0xC8C + SYS_SHM_UNLINK = 0xC8D + SYS_SIND128 = 0xC22 + SYS_SIND32 = 0xC20 + SYS_SIND64 = 0xC21 + SYS_SINHD128 = 0xC25 + SYS_SINHD32 = 0xC23 + SYS_SINHD64 = 0xC24 + SYS_SIZEOF_ALLOC = 0xCEA + SYS_SOCKATMARK = 0xC68 + SYS_SQRTD128 = 0xC28 + SYS_SQRTD32 = 0xC26 + SYS_SQRTD64 = 0xC27 + SYS_STRCHR = 0x0A0 + SYS_STRCSPN = 0x0A1 + SYS_STRERROR = 0x0A8 + SYS_STRERROR_R = 0xB33 + SYS_STRFTIME = 0x0B2 + SYS_STRLEN = 0x0A9 + SYS_STRPBRK = 0x0A2 + SYS_STRSPN = 0x0A4 + SYS_STRSTR = 0x0A5 + SYS_STRTOD128 = 0xC2B + SYS_STRTOD32 = 0xC29 + SYS_STRTOD64 = 0xC2A + SYS_STRTOK = 0x0A6 + SYS_TAND128 = 0xC2E + SYS_TAND32 = 0xC2C + SYS_TAND64 = 0xC2D + SYS_TANHD128 = 0xC31 + SYS_TANHD32 = 0xC2F + SYS_TANHD64 = 0xC30 + SYS_TGAMMAD128 = 0xC34 + SYS_TGAMMAD32 = 0xC32 + SYS_TGAMMAD64 = 0xC33 + SYS_TIME = 0x0AD + SYS_TIME64 = 0xCE1 + SYS_TMPFILE64 = 0xD4D + SYS_TMPFILE64_UNLOCKED = 0xD4E + SYS_TMPFILE_UNLOCKED = 0xCFD + SYS_TRUNCD128 = 0xC40 + SYS_TRUNCD32 = 0xC3E + SYS_TRUNCD64 = 0xC3F + SYS_UNGETC_UNLOCKED = 0xCC3 + SYS_UNGETWC_UNLOCKED = 0xCC4 + SYS_UNSETENV = 0xB34 + SYS_VFPRINTF_UNLOCKED = 0xCC5 + SYS_VFSCANF_UNLOCKED = 0xCC7 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 + SYS_VFWSCANF_UNLOCKED = 0xCCB + SYS_VPRINTF_UNLOCKED = 0xCCD + SYS_VSCANF_UNLOCKED = 0xCCF + SYS_VWPRINTF_UNLOCKED = 0xCD1 + SYS_VWSCANF_UNLOCKED = 0xCD3 + SYS_WCSTOD128 = 0xC43 + SYS_WCSTOD32 = 0xC41 + SYS_WCSTOD64 = 0xC42 + SYS_WPRINTF_UNLOCKED = 0xCD5 + SYS_WSCANF_UNLOCKED = 0xCD6 + SYS__FLUSHLBF = 0xD68 + SYS__FLUSHLBF_UNLOCKED = 0xD6F + SYS___ACOSHF_H = 0xA54 + SYS___ACOSHL_H = 0xA55 + SYS___ASINHF_H = 0xA56 + SYS___ASINHL_H = 0xA57 + SYS___ATANPID128 = 0xC6D + SYS___ATANPID32 = 0xC6B + SYS___ATANPID64 = 0xC6C + SYS___CBRTF_H = 0xA58 + SYS___CBRTL_H = 0xA59 + SYS___CDUMP = 0x0C4 + SYS___CLASS = 0xAFA + SYS___CLASS2 = 0xB99 + SYS___CLASS2D128 = 0xC99 + SYS___CLASS2D32 = 0xC97 + SYS___CLASS2D64 = 0xC98 + SYS___CLASS2F = 0xC91 + SYS___CLASS2F_B = 0xC93 + SYS___CLASS2F_H = 0xC94 + SYS___CLASS2L = 0xC92 + SYS___CLASS2L_B = 0xC95 + SYS___CLASS2L_H = 0xC96 + SYS___CLASS2_B = 0xB9A + SYS___CLASS2_H = 0xB9B + SYS___CLASS_B = 0xAFB + SYS___CLASS_H = 0xAFC + SYS___CLOGL_B = 0xA01 + SYS___CLOGL_H = 0xA02 + SYS___CLRENV = 0x0C9 + SYS___CLRMF = 0x0BD + SYS___CODEPAGE_INFO = 0xC64 + SYS___CONJF_B = 0xA07 + SYS___CONJF_H = 0xA08 + SYS___CONJL_B = 0xA0A + SYS___CONJL_H = 0xA0B + SYS___CONJ_B = 0xA04 + SYS___CONJ_H = 0xA05 + SYS___COPYSIGN_B = 0xA5A + SYS___COPYSIGN_H = 0xAF5 + SYS___COSPID128 = 0xC70 + SYS___COSPID32 = 0xC6E + SYS___COSPID64 = 0xC6F + SYS___CPOWF_B = 0xA10 + SYS___CPOWF_H = 0xA11 + SYS___CPOWL_B = 0xA13 + SYS___CPOWL_H = 0xA14 + SYS___CPOW_B = 0xA0D + SYS___CPOW_H = 0xA0E + SYS___CPROJF_B = 0xA19 + SYS___CPROJF_H = 0xA1A + SYS___CPROJL_B = 0xA1C + SYS___CPROJL_H = 0xA1D + SYS___CPROJ_B = 0xA16 + SYS___CPROJ_H = 0xA17 + SYS___CREALF_B = 0xA22 + SYS___CREALF_H = 0xA23 + SYS___CREALL_B = 0xA25 + SYS___CREALL_H = 0xA26 + SYS___CREAL_B = 0xA1F + SYS___CREAL_H = 0xA20 + SYS___CSINF_B = 0xA2B + SYS___CSINF_H = 0xA2C + SYS___CSINHF_B = 0xA34 + SYS___CSINHF_H = 0xA35 + SYS___CSINHL_B = 0xA37 + SYS___CSINHL_H = 0xA38 + SYS___CSINH_B = 0xA31 + SYS___CSINH_H = 0xA32 + SYS___CSINL_B = 0xA2E + SYS___CSINL_H = 0xA2F + SYS___CSIN_B = 0xA28 + SYS___CSIN_H = 0xA29 + SYS___CSNAP = 0x0C5 + SYS___CSQRTF_B = 0xA3D + SYS___CSQRTF_H = 0xA3E + SYS___CSQRTL_B = 0xA40 + SYS___CSQRTL_H = 0xA41 + SYS___CSQRT_B = 0xA3A + SYS___CSQRT_H = 0xA3B + SYS___CTANF_B = 0xA46 + SYS___CTANF_H = 0xA47 + SYS___CTANHF_B = 0xA4F + SYS___CTANHF_H = 0xA50 + SYS___CTANHL_B = 0xA52 + SYS___CTANHL_H = 0xA53 + SYS___CTANH_B = 0xA4C + SYS___CTANH_H = 0xA4D + SYS___CTANL_B = 0xA49 + SYS___CTANL_H = 0xA4A + SYS___CTAN_B = 0xA43 + SYS___CTAN_H = 0xA44 + SYS___CTEST = 0x0C7 + SYS___CTRACE = 0x0C6 + SYS___D1TOP = 0xC9B + SYS___D2TOP = 0xC9C + SYS___D4TOP = 0xC9D + SYS___DYNALL = 0x0C3 + SYS___DYNFRE = 0x0C2 + SYS___EXP2F_H = 0xA5E + SYS___EXP2L_H = 0xA5F + SYS___EXP2_H = 0xA5D + SYS___EXPM1F_H = 0xA5B + SYS___EXPM1L_H = 0xA5C + SYS___FBUFSIZE = 0xD60 + SYS___FLBF = 0xD62 + SYS___FLDATA = 0x0C1 + SYS___FMAF_B = 0xA67 + SYS___FMAF_H = 0xA68 + SYS___FMAL_B = 0xA6A + SYS___FMAL_H = 0xA6B + SYS___FMAXF_B = 0xA70 + SYS___FMAXF_H = 0xA71 + SYS___FMAXL_B = 0xA73 + SYS___FMAXL_H = 0xA74 + SYS___FMAX_B = 0xA6D + SYS___FMAX_H = 0xA6E + SYS___FMA_B = 0xA64 + SYS___FMA_H = 0xA65 + SYS___FMINF_B = 0xA79 + SYS___FMINF_H = 0xA7A + SYS___FMINL_B = 0xA7C + SYS___FMINL_H = 0xA7D + SYS___FMIN_B = 0xA76 + SYS___FMIN_H = 0xA77 + SYS___FPENDING = 0xD61 + SYS___FPENDING_UNLOCKED = 0xD6C + SYS___FPURGE = 0xD69 + SYS___FPURGE_UNLOCKED = 0xD70 + SYS___FP_CAST_D = 0xBCB + SYS___FREADABLE = 0xD63 + SYS___FREADAHEAD = 0xD6A + SYS___FREADAHEAD_UNLOCKED = 0xD71 + SYS___FREADING = 0xD65 + SYS___FREADING_UNLOCKED = 0xD6D + SYS___FSEEK2 = 0xB3C + SYS___FSETERR = 0xD6B + SYS___FSETLOCKING = 0xD67 + SYS___FTCHEP = 0x0BF + SYS___FTELL2 = 0xB3B + SYS___FUPDT = 0x0B5 + SYS___FWRITABLE = 0xD64 + SYS___FWRITING = 0xD66 + SYS___FWRITING_UNLOCKED = 0xD6E + SYS___GETCB = 0x0B4 + SYS___GETGRGID1 = 0xD5B + SYS___GETGRNAM1 = 0xD5C + SYS___GETTHENT = 0xCE5 + SYS___GETTOD = 0xD3E + SYS___HYPOTF_H = 0xAF6 + SYS___HYPOTL_H = 0xAF7 + SYS___ILOGBF_B = 0xA7F + SYS___ILOGBF_H = 0xA80 + SYS___ILOGBL_B = 0xA82 + SYS___ILOGBL_H = 0xA83 + SYS___ISBLANK_A = 0xB2E + SYS___ISBLNK = 0x0FE + SYS___ISWBLANK_A = 0xB2F + SYS___LE_CEEGTJS = 0xD72 + SYS___LE_TRACEBACK = 0xB7A + SYS___LGAMMAL_H = 0xA62 + SYS___LGAMMA_B_C99 = 0xB39 + SYS___LGAMMA_H_C99 = 0xB38 + SYS___LGAMMA_R_C99 = 0xB3A + SYS___LLRINTF_B = 0xA88 + SYS___LLRINTF_H = 0xA89 + SYS___LLRINTL_B = 0xA8B + SYS___LLRINTL_H = 0xA8C + SYS___LLRINT_B = 0xA85 + SYS___LLRINT_H = 0xA86 + SYS___LLROUNDF_B = 0xA91 + SYS___LLROUNDF_H = 0xA92 + SYS___LLROUNDL_B = 0xA94 + SYS___LLROUNDL_H = 0xA95 + SYS___LLROUND_B = 0xA8E + SYS___LLROUND_H = 0xA8F + SYS___LOCALE_CTL = 0xD47 + SYS___LOG1PF_H = 0xA60 + SYS___LOG1PL_H = 0xA61 + SYS___LOGBF_B = 0xA97 + SYS___LOGBF_H = 0xA98 + SYS___LOGBL_B = 0xA9A + SYS___LOGBL_H = 0xA9B + SYS___LOGIN_APPLID = 0xCE2 + SYS___LRINTF_B = 0xAA0 + SYS___LRINTF_H = 0xAA1 + SYS___LRINTL_B = 0xAA3 + SYS___LRINTL_H = 0xAA4 + SYS___LRINT_B = 0xA9D + SYS___LRINT_H = 0xA9E + SYS___LROUNDF_FIXUP = 0xB31 + SYS___LROUNDL_B = 0xAA6 + SYS___LROUNDL_H = 0xAA7 + SYS___LROUND_FIXUP = 0xB30 + SYS___MOSERVICES = 0xD3D + SYS___MUST_STAY_CLEAN = 0xB7C + SYS___NANF_B = 0xAAB + SYS___NANL_B = 0xAAD + SYS___NAN_B = 0xAA9 + SYS___NEARBYINTF_B = 0xAB2 + SYS___NEARBYINTF_H = 0xAB3 + SYS___NEARBYINTL_B = 0xAB5 + SYS___NEARBYINTL_H = 0xAB6 + SYS___NEARBYINT_B = 0xAAF + SYS___NEARBYINT_H = 0xAB0 + SYS___NEXTAFTERF_B = 0xAB8 + SYS___NEXTAFTERF_H = 0xAB9 + SYS___NEXTAFTERL_B = 0xABB + SYS___NEXTAFTERL_H = 0xABC + SYS___NEXTTOWARDF_B = 0xAC1 + SYS___NEXTTOWARDF_H = 0xAC2 + SYS___NEXTTOWARDL_B = 0xAC4 + SYS___NEXTTOWARDL_H = 0xAC5 + SYS___NEXTTOWARD_B = 0xABE + SYS___NEXTTOWARD_H = 0xABF + SYS___O_ENV = 0xB7D + SYS___PASSWD_APPLID = 0xCE3 + SYS___PTOD1 = 0xC9E + SYS___PTOD2 = 0xC9F + SYS___PTOD4 = 0xCA0 + SYS___REGCOMP_STD = 0x0EA + SYS___REMAINDERF_H = 0xAC6 + SYS___REMAINDERL_H = 0xAC7 + SYS___REMQUOD128 = 0xC10 + SYS___REMQUOD32 = 0xC0E + SYS___REMQUOD64 = 0xC0F + SYS___REMQUOF_H = 0xAC9 + SYS___REMQUOL_H = 0xACA + SYS___REMQUO_H = 0xAC8 + SYS___RINTF_B = 0xACC + SYS___RINTL_B = 0xACE + SYS___ROUNDF_B = 0xAD3 + SYS___ROUNDF_H = 0xAD4 + SYS___ROUNDL_B = 0xAD6 + SYS___ROUNDL_H = 0xAD7 + SYS___ROUND_B = 0xAD0 + SYS___ROUND_H = 0xAD1 + SYS___SCALBLNF_B = 0xADC + SYS___SCALBLNF_H = 0xADD + SYS___SCALBLNL_B = 0xADF + SYS___SCALBLNL_H = 0xAE0 + SYS___SCALBLN_B = 0xAD9 + SYS___SCALBLN_H = 0xADA + SYS___SCALBNF_B = 0xAE4 + SYS___SCALBNF_H = 0xAE5 + SYS___SCALBNL_B = 0xAE7 + SYS___SCALBNL_H = 0xAE8 + SYS___SCALBN_B = 0xAE1 + SYS___SCALBN_H = 0xAE2 + SYS___SETENV = 0x0C8 + SYS___SINPID128 = 0xC73 + SYS___SINPID32 = 0xC71 + SYS___SINPID64 = 0xC72 + SYS___SMF_RECORD2 = 0xD48 + SYS___STATIC_REINIT = 0xB3D + SYS___TGAMMAF_H_C99 = 0xB79 + SYS___TGAMMAL_H = 0xAE9 + SYS___TGAMMA_H_C99 = 0xB78 + SYS___TOCSNAME2 = 0xC9A + SYS_CEIL = 0x01F + SYS_CHAUDIT = 0x1E0 + SYS_EXP = 0x01A + SYS_FCHAUDIT = 0x1E1 + SYS_FREXP = 0x01D + SYS_GETGROUPSBYNAME = 0x1E2 + SYS_GETPWUID = 0x1A0 + SYS_GETUID = 0x1A1 + SYS_ISATTY = 0x1A3 + SYS_KILL = 0x1A4 + SYS_LDEXP = 0x01E + SYS_LINK = 0x1A5 + SYS_LOG10 = 0x01C + SYS_LSEEK = 0x1A6 + SYS_LSTAT = 0x1A7 + SYS_MKDIR = 0x1A8 + SYS_MKFIFO = 0x1A9 + SYS_MKNOD = 0x1AA + SYS_MODF = 0x01B + SYS_MOUNT = 0x1AB + SYS_OPEN = 0x1AC + SYS_OPENDIR = 0x1AD + SYS_PATHCONF = 0x1AE + SYS_PAUSE = 0x1AF + SYS_PIPE = 0x1B0 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED + SYS_PTHREAD_ATTR_INIT = 0x1E6 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC + SYS_PTHREAD_CANCEL = 0x1EE + SYS_PTHREAD_CLEANUP_POP = 0x1F0 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 + SYS_PTHREAD_COND_DESTROY = 0x1F4 + SYS_PTHREAD_COND_INIT = 0x1F3 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 + SYS_PTHREAD_COND_WAIT = 0x1F7 + SYS_PTHREAD_CREATE = 0x1F9 + SYS_PTHREAD_DETACH = 0x1FA + SYS_PTHREAD_EQUAL = 0x1FB + SYS_PTHREAD_EXIT = 0x1E4 + SYS_PTHREAD_GETSPECIFIC = 0x1FC + SYS_PTHREAD_JOIN = 0x1FD + SYS_PTHREAD_KEY_CREATE = 0x1FE + SYS_PTHREAD_KILL = 0x1E5 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF + SYS_READ = 0x1B2 + SYS_READDIR = 0x1B3 + SYS_READLINK = 0x1B4 + SYS_REWINDDIR = 0x1B5 + SYS_RMDIR = 0x1B6 + SYS_SETEGID = 0x1B7 + SYS_SETEUID = 0x1B8 + SYS_SETGID = 0x1B9 + SYS_SETPGID = 0x1BA + SYS_SETSID = 0x1BB + SYS_SETUID = 0x1BC + SYS_SIGACTION = 0x1BD + SYS_SIGADDSET = 0x1BE + SYS_SIGDELSET = 0x1BF + SYS_SIGEMPTYSET = 0x1C0 + SYS_SIGFILLSET = 0x1C1 + SYS_SIGISMEMBER = 0x1C2 + SYS_SIGLONGJMP = 0x1C3 + SYS_SIGPENDING = 0x1C4 + SYS_SIGPROCMASK = 0x1C5 + SYS_SIGSETJMP = 0x1C6 + SYS_SIGSUSPEND = 0x1C7 + SYS_SIGWAIT = 0x1E3 + SYS_SLEEP = 0x1C8 + SYS_STAT = 0x1C9 + SYS_SYMLINK = 0x1CB + SYS_SYSCONF = 0x1CC + SYS_TCDRAIN = 0x1CD + SYS_TCFLOW = 0x1CE + SYS_TCFLUSH = 0x1CF + SYS_TCGETATTR = 0x1D0 + SYS_TCGETPGRP = 0x1D1 + SYS_TCSENDBREAK = 0x1D2 + SYS_TCSETATTR = 0x1D3 + SYS_TCSETPGRP = 0x1D4 + SYS_TIMES = 0x1D5 + SYS_TTYNAME = 0x1D6 + SYS_TZSET = 0x1D7 + SYS_UMASK = 0x1D8 + SYS_UMOUNT = 0x1D9 + SYS_UNAME = 0x1DA + SYS_UNLINK = 0x1DB + SYS_UTIME = 0x1DC + SYS_WAIT = 0x1DD + SYS_WAITPID = 0x1DE + SYS_WRITE = 0x1DF + SYS_W_GETPSENT = 0x1B1 + SYS_W_IOCTL = 0x1A2 + SYS_W_STATFS = 0x1CA + SYS_A64L = 0x2EF + SYS_BCMP = 0x2B9 + SYS_BCOPY = 0x2BA + SYS_BZERO = 0x2BB + SYS_CATCLOSE = 0x2B6 + SYS_CATGETS = 0x2B7 + SYS_CATOPEN = 0x2B8 + SYS_CRYPT = 0x2AC + SYS_DBM_CLEARERR = 0x2F7 + SYS_DBM_CLOSE = 0x2F8 + SYS_DBM_DELETE = 0x2F9 + SYS_DBM_ERROR = 0x2FA + SYS_DBM_FETCH = 0x2FB + SYS_DBM_FIRSTKEY = 0x2FC + SYS_DBM_NEXTKEY = 0x2FD + SYS_DBM_OPEN = 0x2FE + SYS_DBM_STORE = 0x2FF + SYS_DRAND48 = 0x2B2 + SYS_ENCRYPT = 0x2AD + SYS_ENDUTXENT = 0x2E1 + SYS_ERAND48 = 0x2B3 + SYS_ERF = 0x02C + SYS_ERFC = 0x02D + SYS_FCHDIR = 0x2D9 + SYS_FFS = 0x2BC + SYS_FMTMSG = 0x2E5 + SYS_FSTATVFS = 0x2B4 + SYS_FTIME = 0x2F5 + SYS_GAMMA = 0x02E + SYS_GETDATE = 0x2A6 + SYS_GETPAGESIZE = 0x2D8 + SYS_GETTIMEOFDAY = 0x2F6 + SYS_GETUTXENT = 0x2E0 + SYS_GETUTXID = 0x2E2 + SYS_GETUTXLINE = 0x2E3 + SYS_HCREATE = 0x2C6 + SYS_HDESTROY = 0x2C7 + SYS_HSEARCH = 0x2C8 + SYS_HYPOT = 0x02B + SYS_INDEX = 0x2BD + SYS_INITSTATE = 0x2C2 + SYS_INSQUE = 0x2CF + SYS_ISASCII = 0x2ED + SYS_JRAND48 = 0x2E6 + SYS_L64A = 0x2F0 + SYS_LCONG48 = 0x2EA + SYS_LFIND = 0x2C9 + SYS_LRAND48 = 0x2E7 + SYS_LSEARCH = 0x2CA + SYS_MEMCCPY = 0x2D4 + SYS_MRAND48 = 0x2E8 + SYS_NRAND48 = 0x2E9 + SYS_PCLOSE = 0x2D2 + SYS_POPEN = 0x2D1 + SYS_PUTUTXLINE = 0x2E4 + SYS_RANDOM = 0x2C4 + SYS_REMQUE = 0x2D0 + SYS_RINDEX = 0x2BE + SYS_SEED48 = 0x2EC + SYS_SETKEY = 0x2AE + SYS_SETSTATE = 0x2C3 + SYS_SETUTXENT = 0x2DF + SYS_SRAND48 = 0x2EB + SYS_SRANDOM = 0x2C5 + SYS_STATVFS = 0x2B5 + SYS_STRCASECMP = 0x2BF + SYS_STRDUP = 0x2C0 + SYS_STRNCASECMP = 0x2C1 + SYS_SWAB = 0x2D3 + SYS_TDELETE = 0x2CB + SYS_TFIND = 0x2CC + SYS_TOASCII = 0x2EE + SYS_TSEARCH = 0x2CD + SYS_TWALK = 0x2CE + SYS_UALARM = 0x2F1 + SYS_USLEEP = 0x2F2 + SYS_WAIT3 = 0x2A7 + SYS_WAITID = 0x2A8 + SYS_Y1 = 0x02A + SYS___ATOE = 0x2DB + SYS___ATOE_L = 0x2DC + SYS___CATTRM = 0x2A9 + SYS___CNVBLK = 0x2AF + SYS___CRYTRM = 0x2B0 + SYS___DLGHT = 0x2A1 + SYS___ECRTRM = 0x2B1 + SYS___ETOA = 0x2DD + SYS___ETOA_L = 0x2DE + SYS___GDTRM = 0x2AA + SYS___OCLCK = 0x2DA + SYS___OPARGF = 0x2A2 + SYS___OPERRF = 0x2A5 + SYS___OPINDF = 0x2A4 + SYS___OPOPTF = 0x2A3 + SYS___RNDTRM = 0x2AB + SYS___SRCTRM = 0x2F4 + SYS___TZONE = 0x2A0 + SYS___UTXTRM = 0x2F3 + SYS_ASIN = 0x03E + SYS_ISXDIGIT = 0x03B + SYS_SETLOCAL = 0x03A + SYS_SETLOCALE = 0x03A + SYS_SIN = 0x03F + SYS_TOLOWER = 0x03C + SYS_TOUPPER = 0x03D + SYS_ACCEPT_AND_RECV = 0x4F7 + SYS_ATOL = 0x04E + SYS_CHECKSCH = 0x4BC + SYS_CHECKSCHENV = 0x4BC + SYS_CLEARERR = 0x04C + SYS_CONNECTS = 0x4B5 + SYS_CONNECTSERVER = 0x4B5 + SYS_CONNECTW = 0x4B4 + SYS_CONNECTWORKMGR = 0x4B4 + SYS_CONTINUE = 0x4B3 + SYS_CONTINUEWORKUNIT = 0x4B3 + SYS_COPYSIGN = 0x4C2 + SYS_CREATEWO = 0x4B2 + SYS_CREATEWORKUNIT = 0x4B2 + SYS_DELETEWO = 0x4B9 + SYS_DELETEWORKUNIT = 0x4B9 + SYS_DISCONNE = 0x4B6 + SYS_DISCONNECTSERVER = 0x4B6 + SYS_FEOF = 0x04D + SYS_FERROR = 0x04A + SYS_FINITE = 0x4C8 + SYS_GAMMA_R = 0x4E2 + SYS_JOINWORK = 0x4B7 + SYS_JOINWORKUNIT = 0x4B7 + SYS_LEAVEWOR = 0x4B8 + SYS_LEAVEWORKUNIT = 0x4B8 + SYS_LGAMMA_R = 0x4EB + SYS_MATHERR = 0x4D0 + SYS_PERROR = 0x04F + SYS_QUERYMET = 0x4BA + SYS_QUERYMETRICS = 0x4BA + SYS_QUERYSCH = 0x4BB + SYS_QUERYSCHENV = 0x4BB + SYS_REWIND = 0x04B + SYS_SCALBN = 0x4D4 + SYS_SIGNIFIC = 0x4D5 + SYS_SIGNIFICAND = 0x4D5 + SYS___ACOSH_B = 0x4DA + SYS___ACOS_B = 0x4D9 + SYS___ASINH_B = 0x4BE + SYS___ASIN_B = 0x4DB + SYS___ATAN2_B = 0x4DC + SYS___ATANH_B = 0x4DD + SYS___ATAN_B = 0x4BF + SYS___CBRT_B = 0x4C0 + SYS___CEIL_B = 0x4C1 + SYS___COSH_B = 0x4DE + SYS___COS_B = 0x4C3 + SYS___DGHT = 0x4A8 + SYS___ENVN = 0x4B0 + SYS___ERFC_B = 0x4C5 + SYS___ERF_B = 0x4C4 + SYS___EXPM1_B = 0x4C6 + SYS___EXP_B = 0x4DF + SYS___FABS_B = 0x4C7 + SYS___FLOOR_B = 0x4C9 + SYS___FMOD_B = 0x4E0 + SYS___FP_SETMODE = 0x4F8 + SYS___FREXP_B = 0x4CA + SYS___GAMMA_B = 0x4E1 + SYS___GDRR = 0x4A1 + SYS___HRRNO = 0x4A2 + SYS___HYPOT_B = 0x4E3 + SYS___ILOGB_B = 0x4CB + SYS___ISNAN_B = 0x4CC + SYS___J0_B = 0x4E4 + SYS___J1_B = 0x4E6 + SYS___JN_B = 0x4E8 + SYS___LDEXP_B = 0x4CD + SYS___LGAMMA_B = 0x4EA + SYS___LOG10_B = 0x4ED + SYS___LOG1P_B = 0x4CE + SYS___LOGB_B = 0x4CF + SYS___LOGIN = 0x4F5 + SYS___LOG_B = 0x4EC + SYS___MLOCKALL = 0x4B1 + SYS___MODF_B = 0x4D1 + SYS___NEXTAFTER_B = 0x4D2 + SYS___OPENDIR2 = 0x4F3 + SYS___OPEN_STAT = 0x4F6 + SYS___OPND = 0x4A5 + SYS___OPPT = 0x4A6 + SYS___OPRG = 0x4A3 + SYS___OPRR = 0x4A4 + SYS___PID_AFFINITY = 0x4BD + SYS___POW_B = 0x4EE + SYS___READDIR2 = 0x4F4 + SYS___REMAINDER_B = 0x4EF + SYS___RINT_B = 0x4D3 + SYS___SCALB_B = 0x4F0 + SYS___SIGACTIONSET = 0x4FB + SYS___SIGGM = 0x4A7 + SYS___SINH_B = 0x4F1 + SYS___SIN_B = 0x4D6 + SYS___SQRT_B = 0x4F2 + SYS___TANH_B = 0x4D8 + SYS___TAN_B = 0x4D7 + SYS___TRRNO = 0x4AF + SYS___TZNE = 0x4A9 + SYS___TZZN = 0x4AA + SYS___UCREATE = 0x4FC + SYS___UFREE = 0x4FE + SYS___UHEAPREPORT = 0x4FF + SYS___UMALLOC = 0x4FD + SYS___Y0_B = 0x4E5 + SYS___Y1_B = 0x4E7 + SYS___YN_B = 0x4E9 + SYS_ABORT = 0x05C + SYS_ASCTIME_R = 0x5E0 + SYS_ATEXIT = 0x05D + SYS_CONNECTE = 0x5AE + SYS_CONNECTEXPORTIMPORT = 0x5AE + SYS_CTIME_R = 0x5E1 + SYS_DN_COMP = 0x5DF + SYS_DN_EXPAND = 0x5DD + SYS_DN_SKIPNAME = 0x5DE + SYS_EXIT = 0x05A + SYS_EXPORTWO = 0x5A1 + SYS_EXPORTWORKUNIT = 0x5A1 + SYS_EXTRACTW = 0x5A5 + SYS_EXTRACTWORKUNIT = 0x5A5 + SYS_FSEEKO = 0x5C9 + SYS_FTELLO = 0x5C8 + SYS_GETGRGID_R = 0x5E7 + SYS_GETGRNAM_R = 0x5E8 + SYS_GETLOGIN_R = 0x5E9 + SYS_GETPWNAM_R = 0x5EA + SYS_GETPWUID_R = 0x5EB + SYS_GMTIME_R = 0x5E2 + SYS_IMPORTWO = 0x5A3 + SYS_IMPORTWORKUNIT = 0x5A3 + SYS_INET_NTOP = 0x5D3 + SYS_INET_PTON = 0x5D4 + SYS_LLABS = 0x5CE + SYS_LLDIV = 0x5CB + SYS_LOCALTIME_R = 0x5E3 + SYS_PTHREAD_ATFORK = 0x5ED + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 + SYS_PTHREAD_DETACH_U98 = 0x5FD + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE + SYS_PTHREAD_KEY_DELETE = 0x5F5 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 + SYS_PTHREAD_SIGMASK = 0x5F7 + SYS_QUERYENC = 0x5AD + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD + SYS_RAISE = 0x05E + SYS_RAND_R = 0x5E4 + SYS_READDIR_R = 0x5E6 + SYS_REALLOC = 0x05B + SYS_RES_INIT = 0x5D8 + SYS_RES_MKQUERY = 0x5D7 + SYS_RES_QUERY = 0x5D9 + SYS_RES_QUERYDOMAIN = 0x5DC + SYS_RES_SEARCH = 0x5DA + SYS_RES_SEND = 0x5DB + SYS_SETJMP = 0x05F + SYS_SIGQUEUE = 0x5A9 + SYS_STRTOK_R = 0x5E5 + SYS_STRTOLL = 0x5B0 + SYS_STRTOULL = 0x5B1 + SYS_TTYNAME_R = 0x5EC + SYS_UNDOEXPO = 0x5A2 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 + SYS_UNDOIMPO = 0x5A4 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 + SYS_WCSTOLL = 0x5CC + SYS_WCSTOULL = 0x5CD + SYS___ABORT = 0x05C + SYS___CONSOLE2 = 0x5D2 + SYS___CPL = 0x5A6 + SYS___DISCARDDATA = 0x5F8 + SYS___DSA_PREV = 0x5B2 + SYS___EP_FIND = 0x5B3 + SYS___FP_SWAPMODE = 0x5AF + SYS___GETUSERID = 0x5AB + SYS___GET_CPUID = 0x5B9 + SYS___GET_SYSTEM_SETTINGS = 0x5BA + SYS___IPDOMAINNAME = 0x5AC + SYS___MAP_INIT = 0x5A7 + SYS___MAP_SERVICE = 0x5A8 + SYS___MOUNT = 0x5AA + SYS___MSGRCV_TIMED = 0x5B7 + SYS___RES = 0x5D6 + SYS___SEMOP_TIMED = 0x5B8 + SYS___SERVER_THREADS_QUERY = 0x5B4 + SYS_FPRINTF = 0x06D + SYS_FSCANF = 0x06A + SYS_PRINTF = 0x06F + SYS_SETBUF = 0x06B + SYS_SETVBUF = 0x06C + SYS_SSCANF = 0x06E + SYS___CATGETS_A = 0x6C0 + SYS___CHAUDIT_A = 0x6F4 + SYS___CHMOD_A = 0x6E8 + SYS___COLLATE_INIT_A = 0x6AC + SYS___CREAT_A = 0x6F6 + SYS___CTYPE_INIT_A = 0x6AF + SYS___DLLLOAD_A = 0x6DF + SYS___DLLQUERYFN_A = 0x6E0 + SYS___DLLQUERYVAR_A = 0x6E1 + SYS___E2A_L = 0x6E3 + SYS___EXECLE_A = 0x6A0 + SYS___EXECLP_A = 0x6A4 + SYS___EXECVE_A = 0x6C1 + SYS___EXECVP_A = 0x6C2 + SYS___EXECV_A = 0x6B1 + SYS___FPRINTF_A = 0x6FA + SYS___GETADDRINFO_A = 0x6BF + SYS___GETNAMEINFO_A = 0x6C4 + SYS___GET_WCTYPE_STD_A = 0x6AE + SYS___ICONV_OPEN_A = 0x6DE + SYS___IF_INDEXTONAME_A = 0x6DC + SYS___IF_NAMETOINDEX_A = 0x6DB + SYS___ISWCTYPE_A = 0x6B0 + SYS___IS_WCTYPE_STD_A = 0x6B2 + SYS___LOCALECONV_A = 0x6B8 + SYS___LOCALECONV_STD_A = 0x6B9 + SYS___LOCALE_INIT_A = 0x6B7 + SYS___LSTAT_A = 0x6EE + SYS___LSTAT_O_A = 0x6EF + SYS___MKDIR_A = 0x6E9 + SYS___MKFIFO_A = 0x6EC + SYS___MKNOD_A = 0x6F0 + SYS___MONETARY_INIT_A = 0x6BC + SYS___MOUNT_A = 0x6F1 + SYS___NL_CSINFO_A = 0x6D6 + SYS___NL_LANGINFO_A = 0x6BA + SYS___NL_LNAGINFO_STD_A = 0x6BB + SYS___NL_MONINFO_A = 0x6D7 + SYS___NL_NUMINFO_A = 0x6D8 + SYS___NL_RESPINFO_A = 0x6D9 + SYS___NL_TIMINFO_A = 0x6DA + SYS___NUMERIC_INIT_A = 0x6C6 + SYS___OPEN_A = 0x6F7 + SYS___PRINTF_A = 0x6DD + SYS___RESP_INIT_A = 0x6C7 + SYS___RPMATCH_A = 0x6C8 + SYS___RPMATCH_C_A = 0x6C9 + SYS___RPMATCH_STD_A = 0x6CA + SYS___SETLOCALE_A = 0x6F9 + SYS___SPAWNP_A = 0x6C5 + SYS___SPAWN_A = 0x6C3 + SYS___SPRINTF_A = 0x6FB + SYS___STAT_A = 0x6EA + SYS___STAT_O_A = 0x6EB + SYS___STRCOLL_STD_A = 0x6A1 + SYS___STRFMON_A = 0x6BD + SYS___STRFMON_STD_A = 0x6BE + SYS___STRFTIME_A = 0x6CC + SYS___STRFTIME_STD_A = 0x6CD + SYS___STRPTIME_A = 0x6CE + SYS___STRPTIME_STD_A = 0x6CF + SYS___STRXFRM_A = 0x6A2 + SYS___STRXFRM_C_A = 0x6A3 + SYS___STRXFRM_STD_A = 0x6A5 + SYS___SYNTAX_INIT_A = 0x6D4 + SYS___TIME_INIT_A = 0x6CB + SYS___TOD_INIT_A = 0x6D5 + SYS___TOWLOWER_A = 0x6B3 + SYS___TOWLOWER_STD_A = 0x6B4 + SYS___TOWUPPER_A = 0x6B5 + SYS___TOWUPPER_STD_A = 0x6B6 + SYS___UMOUNT_A = 0x6F2 + SYS___VFPRINTF_A = 0x6FC + SYS___VPRINTF_A = 0x6FD + SYS___VSPRINTF_A = 0x6FE + SYS___VSWPRINTF_A = 0x6FF + SYS___WCSCOLL_A = 0x6A6 + SYS___WCSCOLL_C_A = 0x6A7 + SYS___WCSCOLL_STD_A = 0x6A8 + SYS___WCSFTIME_A = 0x6D0 + SYS___WCSFTIME_STD_A = 0x6D1 + SYS___WCSXFRM_A = 0x6A9 + SYS___WCSXFRM_C_A = 0x6AA + SYS___WCSXFRM_STD_A = 0x6AB + SYS___WCTYPE_A = 0x6AD + SYS___W_GETMNTENT_A = 0x6F5 + SYS_____CCSIDTYPE_A = 0x6E6 + SYS_____CHATTR_A = 0x6E2 + SYS_____CSNAMETYPE_A = 0x6E7 + SYS_____OPEN_STAT_A = 0x6ED + SYS_____SPAWN2_A = 0x6D2 + SYS_____SPAWNP2_A = 0x6D3 + SYS_____TOCCSID_A = 0x6E4 + SYS_____TOCSNAME_A = 0x6E5 + SYS_ACL_FREE = 0x7FF + SYS_ACL_INIT = 0x7FE + SYS_FWIDE = 0x7DF + SYS_FWPRINTF = 0x7D1 + SYS_FWRITE = 0x07E + SYS_FWSCANF = 0x7D5 + SYS_GETCHAR = 0x07B + SYS_GETS = 0x07C + SYS_M_CREATE_LAYOUT = 0x7C9 + SYS_M_DESTROY_LAYOUT = 0x7CA + SYS_M_GETVALUES_LAYOUT = 0x7CB + SYS_M_SETVALUES_LAYOUT = 0x7CC + SYS_M_TRANSFORM_LAYOUT = 0x7CD + SYS_M_WTRANSFORM_LAYOUT = 0x7CE + SYS_PREAD = 0x7C7 + SYS_PUTC = 0x07D + SYS_PUTCHAR = 0x07A + SYS_PUTS = 0x07F + SYS_PWRITE = 0x7C8 + SYS_TOWCTRAN = 0x7D8 + SYS_TOWCTRANS = 0x7D8 + SYS_UNATEXIT = 0x7B5 + SYS_VFWPRINT = 0x7D3 + SYS_VFWPRINTF = 0x7D3 + SYS_VWPRINTF = 0x7D4 + SYS_WCTRANS = 0x7D7 + SYS_WPRINTF = 0x7D2 + SYS_WSCANF = 0x7D6 + SYS___ASCTIME_R_A = 0x7A1 + SYS___BASENAME_A = 0x7DC + SYS___BTOWC_A = 0x7E4 + SYS___CDUMP_A = 0x7B7 + SYS___CEE3DMP_A = 0x7B6 + SYS___CEILF_H = 0x7F4 + SYS___CEILL_H = 0x7F5 + SYS___CEIL_H = 0x7EA + SYS___CRYPT_A = 0x7BE + SYS___CSNAP_A = 0x7B8 + SYS___CTEST_A = 0x7B9 + SYS___CTIME_R_A = 0x7A2 + SYS___CTRACE_A = 0x7BA + SYS___DBM_OPEN_A = 0x7E6 + SYS___DIRNAME_A = 0x7DD + SYS___FABSF_H = 0x7FA + SYS___FABSL_H = 0x7FB + SYS___FABS_H = 0x7ED + SYS___FGETWC_A = 0x7AA + SYS___FGETWS_A = 0x7AD + SYS___FLOORF_H = 0x7F6 + SYS___FLOORL_H = 0x7F7 + SYS___FLOOR_H = 0x7EB + SYS___FPUTWC_A = 0x7A5 + SYS___FPUTWS_A = 0x7A8 + SYS___GETTIMEOFDAY_A = 0x7AE + SYS___GETWCHAR_A = 0x7AC + SYS___GETWC_A = 0x7AB + SYS___GLOB_A = 0x7DE + SYS___GMTIME_A = 0x7AF + SYS___GMTIME_R_A = 0x7B0 + SYS___INET_PTON_A = 0x7BC + SYS___J0_H = 0x7EE + SYS___J1_H = 0x7EF + SYS___JN_H = 0x7F0 + SYS___LOCALTIME_A = 0x7B1 + SYS___LOCALTIME_R_A = 0x7B2 + SYS___MALLOC24 = 0x7FC + SYS___MALLOC31 = 0x7FD + SYS___MKTIME_A = 0x7B3 + SYS___MODFF_H = 0x7F8 + SYS___MODFL_H = 0x7F9 + SYS___MODF_H = 0x7EC + SYS___OPENDIR_A = 0x7C2 + SYS___OSNAME = 0x7E0 + SYS___PUTWCHAR_A = 0x7A7 + SYS___PUTWC_A = 0x7A6 + SYS___READDIR_A = 0x7C3 + SYS___STRTOLL_A = 0x7A3 + SYS___STRTOULL_A = 0x7A4 + SYS___SYSLOG_A = 0x7BD + SYS___TZZNA = 0x7B4 + SYS___UNGETWC_A = 0x7A9 + SYS___UTIME_A = 0x7A0 + SYS___VFPRINTF2_A = 0x7E7 + SYS___VPRINTF2_A = 0x7E8 + SYS___VSPRINTF2_A = 0x7E9 + SYS___VSWPRNTF2_A = 0x7BB + SYS___WCSTOD_A = 0x7D9 + SYS___WCSTOL_A = 0x7DA + SYS___WCSTOUL_A = 0x7DB + SYS___WCTOB_A = 0x7E5 + SYS___Y0_H = 0x7F1 + SYS___Y1_H = 0x7F2 + SYS___YN_H = 0x7F3 + SYS_____OPENDIR2_A = 0x7BF + SYS_____OSNAME_A = 0x7E1 + SYS_____READDIR2_A = 0x7C0 + SYS_DLCLOSE = 0x8DF + SYS_DLERROR = 0x8E0 + SYS_DLOPEN = 0x8DD + SYS_DLSYM = 0x8DE + SYS_FLOCKFILE = 0x8D3 + SYS_FTRYLOCKFILE = 0x8D4 + SYS_FUNLOCKFILE = 0x8D5 + SYS_GETCHAR_UNLOCKED = 0x8D7 + SYS_GETC_UNLOCKED = 0x8D6 + SYS_PUTCHAR_UNLOCKED = 0x8D9 + SYS_PUTC_UNLOCKED = 0x8D8 + SYS_SNPRINTF = 0x8DA + SYS_VSNPRINTF = 0x8DB + SYS_WCSCSPN = 0x08B + SYS_WCSLEN = 0x08C + SYS_WCSNCAT = 0x08D + SYS_WCSNCMP = 0x08A + SYS_WCSNCPY = 0x08F + SYS_WCSSPN = 0x08E + SYS___ABSF_H = 0x8E7 + SYS___ABSL_H = 0x8E8 + SYS___ABS_H = 0x8E6 + SYS___ACOSF_H = 0x8EA + SYS___ACOSH_H = 0x8EC + SYS___ACOSL_H = 0x8EB + SYS___ACOS_H = 0x8E9 + SYS___ASINF_H = 0x8EE + SYS___ASINH_H = 0x8F0 + SYS___ASINL_H = 0x8EF + SYS___ASIN_H = 0x8ED + SYS___ATAN2F_H = 0x8F8 + SYS___ATAN2L_H = 0x8F9 + SYS___ATAN2_H = 0x8F7 + SYS___ATANF_H = 0x8F2 + SYS___ATANHF_H = 0x8F5 + SYS___ATANHL_H = 0x8F6 + SYS___ATANH_H = 0x8F4 + SYS___ATANL_H = 0x8F3 + SYS___ATAN_H = 0x8F1 + SYS___CBRT_H = 0x8FA + SYS___COPYSIGNF_H = 0x8FB + SYS___COPYSIGNL_H = 0x8FC + SYS___COSF_H = 0x8FE + SYS___COSL_H = 0x8FF + SYS___COS_H = 0x8FD + SYS___DLERROR_A = 0x8D2 + SYS___DLOPEN_A = 0x8D0 + SYS___DLSYM_A = 0x8D1 + SYS___GETUTXENT_A = 0x8C6 + SYS___GETUTXID_A = 0x8C7 + SYS___GETUTXLINE_A = 0x8C8 + SYS___ITOA = 0x8AA + SYS___ITOA_A = 0x8B0 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 + SYS___LE_MSG_ADD_INSERT = 0x8A6 + SYS___LE_MSG_GET = 0x8A7 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 + SYS___LE_MSG_WRITE = 0x8A9 + SYS___LLTOA = 0x8AE + SYS___LLTOA_A = 0x8B4 + SYS___LTOA = 0x8AC + SYS___LTOA_A = 0x8B2 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC + SYS___PUTC_UNLOCKED_A = 0x8CB + SYS___PUTUTXLINE_A = 0x8C9 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 + SYS___REXEC_A = 0x8C4 + SYS___REXEC_AF_A = 0x8C5 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 + SYS___SNPRINTF_A = 0x8CD + SYS___SUPERKILL = 0x8A4 + SYS___TCGETATTR_A = 0x8A1 + SYS___TCSETATTR_A = 0x8A2 + SYS___ULLTOA = 0x8AF + SYS___ULLTOA_A = 0x8B5 + SYS___ULTOA = 0x8AD + SYS___ULTOA_A = 0x8B3 + SYS___UTOA = 0x8AB + SYS___UTOA_A = 0x8B1 + SYS___VHM_EVENT = 0x8E4 + SYS___VSNPRINTF_A = 0x8CE + SYS_____GETENV_A = 0x8C3 + SYS_____UTMPXNAME_A = 0x8CA + SYS_CACOSH = 0x9A0 + SYS_CACOSHF = 0x9A3 + SYS_CACOSHL = 0x9A6 + SYS_CARG = 0x9A9 + SYS_CARGF = 0x9AC + SYS_CARGL = 0x9AF + SYS_CASIN = 0x9B2 + SYS_CASINF = 0x9B5 + SYS_CASINH = 0x9BB + SYS_CASINHF = 0x9BE + SYS_CASINHL = 0x9C1 + SYS_CASINL = 0x9B8 + SYS_CATAN = 0x9C4 + SYS_CATANF = 0x9C7 + SYS_CATANH = 0x9CD + SYS_CATANHF = 0x9D0 + SYS_CATANHL = 0x9D3 + SYS_CATANL = 0x9CA + SYS_CCOS = 0x9D6 + SYS_CCOSF = 0x9D9 + SYS_CCOSH = 0x9DF + SYS_CCOSHF = 0x9E2 + SYS_CCOSHL = 0x9E5 + SYS_CCOSL = 0x9DC + SYS_CEXP = 0x9E8 + SYS_CEXPF = 0x9EB + SYS_CEXPL = 0x9EE + SYS_CIMAG = 0x9F1 + SYS_CIMAGF = 0x9F4 + SYS_CIMAGL = 0x9F7 + SYS_CLOGF = 0x9FD + SYS_MEMCHR = 0x09B + SYS_MEMCMP = 0x09A + SYS_STRCOLL = 0x09C + SYS_STRNCMP = 0x09D + SYS_STRRCHR = 0x09F + SYS_STRXFRM = 0x09E + SYS___CACOSHF_B = 0x9A4 + SYS___CACOSHF_H = 0x9A5 + SYS___CACOSHL_B = 0x9A7 + SYS___CACOSHL_H = 0x9A8 + SYS___CACOSH_B = 0x9A1 + SYS___CACOSH_H = 0x9A2 + SYS___CARGF_B = 0x9AD + SYS___CARGF_H = 0x9AE + SYS___CARGL_B = 0x9B0 + SYS___CARGL_H = 0x9B1 + SYS___CARG_B = 0x9AA + SYS___CARG_H = 0x9AB + SYS___CASINF_B = 0x9B6 + SYS___CASINF_H = 0x9B7 + SYS___CASINHF_B = 0x9BF + SYS___CASINHF_H = 0x9C0 + SYS___CASINHL_B = 0x9C2 + SYS___CASINHL_H = 0x9C3 + SYS___CASINH_B = 0x9BC + SYS___CASINH_H = 0x9BD + SYS___CASINL_B = 0x9B9 + SYS___CASINL_H = 0x9BA + SYS___CASIN_B = 0x9B3 + SYS___CASIN_H = 0x9B4 + SYS___CATANF_B = 0x9C8 + SYS___CATANF_H = 0x9C9 + SYS___CATANHF_B = 0x9D1 + SYS___CATANHF_H = 0x9D2 + SYS___CATANHL_B = 0x9D4 + SYS___CATANHL_H = 0x9D5 + SYS___CATANH_B = 0x9CE + SYS___CATANH_H = 0x9CF + SYS___CATANL_B = 0x9CB + SYS___CATANL_H = 0x9CC + SYS___CATAN_B = 0x9C5 + SYS___CATAN_H = 0x9C6 + SYS___CCOSF_B = 0x9DA + SYS___CCOSF_H = 0x9DB + SYS___CCOSHF_B = 0x9E3 + SYS___CCOSHF_H = 0x9E4 + SYS___CCOSHL_B = 0x9E6 + SYS___CCOSHL_H = 0x9E7 + SYS___CCOSH_B = 0x9E0 + SYS___CCOSH_H = 0x9E1 + SYS___CCOSL_B = 0x9DD + SYS___CCOSL_H = 0x9DE + SYS___CCOS_B = 0x9D7 + SYS___CCOS_H = 0x9D8 + SYS___CEXPF_B = 0x9EC + SYS___CEXPF_H = 0x9ED + SYS___CEXPL_B = 0x9EF + SYS___CEXPL_H = 0x9F0 + SYS___CEXP_B = 0x9E9 + SYS___CEXP_H = 0x9EA + SYS___CIMAGF_B = 0x9F5 + SYS___CIMAGF_H = 0x9F6 + SYS___CIMAGL_B = 0x9F8 + SYS___CIMAGL_H = 0x9F9 + SYS___CIMAG_B = 0x9F2 + SYS___CIMAG_H = 0x9F3 + SYS___CLOG = 0x9FA + SYS___CLOGF_B = 0x9FE + SYS___CLOGF_H = 0x9FF + SYS___CLOG_B = 0x9FB + SYS___CLOG_H = 0x9FC + SYS_ISWCTYPE = 0x10C + SYS_ISWXDIGI = 0x10A + SYS_ISWXDIGIT = 0x10A + SYS_MBSINIT = 0x10F + SYS_TOWLOWER = 0x10D + SYS_TOWUPPER = 0x10E + SYS_WCTYPE = 0x10B + SYS_WCSSTR = 0x11B + SYS___RPMTCH = 0x11A + SYS_WCSTOD = 0x12E + SYS_WCSTOK = 0x12C + SYS_WCSTOL = 0x12D + SYS_WCSTOUL = 0x12F + SYS_FGETWC = 0x13C + SYS_FGETWS = 0x13D + SYS_FPUTWC = 0x13E + SYS_FPUTWS = 0x13F + SYS_REGERROR = 0x13B + SYS_REGFREE = 0x13A + SYS_COLLEQUIV = 0x14F + SYS_COLLTOSTR = 0x14E + SYS_ISMCCOLLEL = 0x14C + SYS_STRTOCOLL = 0x14D + SYS_DLLFREE = 0x16F + SYS_DLLQUERYFN = 0x16D + SYS_DLLQUERYVAR = 0x16E + SYS_GETMCCOLL = 0x16A + SYS_GETWMCCOLL = 0x16B + SYS___ERR2AD = 0x16C + SYS_CFSETOSPEED = 0x17A + SYS_CHDIR = 0x17B + SYS_CHMOD = 0x17C + SYS_CHOWN = 0x17D + SYS_CLOSE = 0x17E + SYS_CLOSEDIR = 0x17F + SYS_LOG = 0x017 + SYS_COSH = 0x018 + SYS_FCHMOD = 0x18A + SYS_FCHOWN = 0x18B + SYS_FCNTL = 0x18C + SYS_FILENO = 0x18D + SYS_FORK = 0x18E + SYS_FPATHCONF = 0x18F + SYS_GETLOGIN = 0x19A + SYS_GETPGRP = 0x19C + SYS_GETPID = 0x19D + SYS_GETPPID = 0x19E + SYS_GETPWNAM = 0x19F + SYS_TANH = 0x019 + SYS_W_GETMNTENT = 0x19B + SYS_POW = 0x020 + SYS_PTHREAD_SELF = 0x20A + SYS_PTHREAD_SETINTR = 0x20B + SYS_PTHREAD_SETINTRTYPE = 0x20C + SYS_PTHREAD_SETSPECIFIC = 0x20D + SYS_PTHREAD_TESTINTR = 0x20E + SYS_PTHREAD_YIELD = 0x20F + SYS_SQRT = 0x021 + SYS_FLOOR = 0x022 + SYS_J1 = 0x023 + SYS_WCSPBRK = 0x23F + SYS_BSEARCH = 0x24C + SYS_FABS = 0x024 + SYS_GETENV = 0x24A + SYS_LDIV = 0x24D + SYS_SYSTEM = 0x24B + SYS_FMOD = 0x025 + SYS___RETHROW = 0x25F + SYS___THROW = 0x25E + SYS_J0 = 0x026 + SYS_PUTENV = 0x26A + SYS___GETENV = 0x26F + SYS_SEMCTL = 0x27A + SYS_SEMGET = 0x27B + SYS_SEMOP = 0x27C + SYS_SHMAT = 0x27D + SYS_SHMCTL = 0x27E + SYS_SHMDT = 0x27F + SYS_YN = 0x027 + SYS_JN = 0x028 + SYS_SIGALTSTACK = 0x28A + SYS_SIGHOLD = 0x28B + SYS_SIGIGNORE = 0x28C + SYS_SIGINTERRUPT = 0x28D + SYS_SIGPAUSE = 0x28E + SYS_SIGRELSE = 0x28F + SYS_GETOPT = 0x29A + SYS_GETSUBOPT = 0x29D + SYS_LCHOWN = 0x29B + SYS_SETPGRP = 0x29E + SYS_TRUNCATE = 0x29C + SYS_Y0 = 0x029 + SYS___GDERR = 0x29F + SYS_ISALPHA = 0x030 + SYS_VFORK = 0x30F + SYS__LONGJMP = 0x30D + SYS__SETJMP = 0x30E + SYS_GLOB = 0x31A + SYS_GLOBFREE = 0x31B + SYS_ISALNUM = 0x031 + SYS_PUTW = 0x31C + SYS_SEEKDIR = 0x31D + SYS_TELLDIR = 0x31E + SYS_TEMPNAM = 0x31F + SYS_GETTIMEOFDAY_R = 0x32E + SYS_ISLOWER = 0x032 + SYS_LGAMMA = 0x32C + SYS_REMAINDER = 0x32A + SYS_SCALB = 0x32B + SYS_SYNC = 0x32F + SYS_TTYSLOT = 0x32D + SYS_ENDPROTOENT = 0x33A + SYS_ENDSERVENT = 0x33B + SYS_GETHOSTBYADDR = 0x33D + SYS_GETHOSTBYADDR_R = 0x33C + SYS_GETHOSTBYNAME = 0x33F + SYS_GETHOSTBYNAME_R = 0x33E + SYS_ISCNTRL = 0x033 + SYS_GETSERVBYNAME = 0x34A + SYS_GETSERVBYPORT = 0x34B + SYS_GETSERVENT = 0x34C + SYS_GETSOCKNAME = 0x34D + SYS_GETSOCKOPT = 0x34E + SYS_INET_ADDR = 0x34F + SYS_ISDIGIT = 0x034 + SYS_ISGRAPH = 0x035 + SYS_SELECT = 0x35B + SYS_SELECTEX = 0x35C + SYS_SEND = 0x35D + SYS_SENDTO = 0x35F + SYS_CHROOT = 0x36A + SYS_ISNAN = 0x36D + SYS_ISUPPER = 0x036 + SYS_ULIMIT = 0x36C + SYS_UTIMES = 0x36E + SYS_W_STATVFS = 0x36B + SYS___H_ERRNO = 0x36F + SYS_GRANTPT = 0x37A + SYS_ISPRINT = 0x037 + SYS_TCGETSID = 0x37C + SYS_UNLOCKPT = 0x37B + SYS___TCGETCP = 0x37D + SYS___TCSETCP = 0x37E + SYS___TCSETTABLES = 0x37F + SYS_ISPUNCT = 0x038 + SYS_NLIST = 0x38C + SYS___IPDBCS = 0x38D + SYS___IPDSPX = 0x38E + SYS___IPMSGC = 0x38F + SYS___STHOSTENT = 0x38B + SYS___STSERVENT = 0x38A + SYS_ISSPACE = 0x039 + SYS_COS = 0x040 + SYS_T_ALLOC = 0x40A + SYS_T_BIND = 0x40B + SYS_T_CLOSE = 0x40C + SYS_T_CONNECT = 0x40D + SYS_T_ERROR = 0x40E + SYS_T_FREE = 0x40F + SYS_TAN = 0x041 + SYS_T_RCVREL = 0x41A + SYS_T_RCVUDATA = 0x41B + SYS_T_RCVUDERR = 0x41C + SYS_T_SND = 0x41D + SYS_T_SNDDIS = 0x41E + SYS_T_SNDREL = 0x41F + SYS_GETPMSG = 0x42A + SYS_ISASTREAM = 0x42B + SYS_PUTMSG = 0x42C + SYS_PUTPMSG = 0x42D + SYS_SINH = 0x042 + SYS___ISPOSIXON = 0x42E + SYS___OPENMVSREL = 0x42F + SYS_ACOS = 0x043 + SYS_ATAN = 0x044 + SYS_ATAN2 = 0x045 + SYS_FTELL = 0x046 + SYS_FGETPOS = 0x047 + SYS_SOCK_DEBUG = 0x47A + SYS_SOCK_DO_TESTSTOR = 0x47D + SYS_TAKESOCKET = 0x47E + SYS___SERVER_INIT = 0x47F + SYS_FSEEK = 0x048 + SYS___IPHOST = 0x48B + SYS___IPNODE = 0x48C + SYS___SERVER_CLASSIFY_CREATE = 0x48D + SYS___SERVER_CLASSIFY_DESTROY = 0x48E + SYS___SERVER_CLASSIFY_RESET = 0x48F + SYS___SMF_RECORD = 0x48A + SYS_FSETPOS = 0x049 + SYS___FNWSA = 0x49B + SYS___SPAWN2 = 0x49D + SYS___SPAWNP2 = 0x49E + SYS_ATOF = 0x050 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C + SYS_PTHREAD_RWLOCK_INIT = 0x50D + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F + SYS_ATOI = 0x051 + SYS___FP_CLASS = 0x51D + SYS___FP_CLR_FLAG = 0x51A + SYS___FP_FINITE = 0x51E + SYS___FP_ISNAN = 0x51F + SYS___FP_RAISE_XCP = 0x51C + SYS___FP_READ_FLAG = 0x51B + SYS_RAND = 0x052 + SYS_SIGTIMEDWAIT = 0x52D + SYS_SIGWAITINFO = 0x52E + SYS___CHKBFP = 0x52F + SYS___FPC_RS = 0x52C + SYS___FPC_RW = 0x52A + SYS___FPC_SM = 0x52B + SYS_STRTOD = 0x053 + SYS_STRTOL = 0x054 + SYS_STRTOUL = 0x055 + SYS_MALLOC = 0x056 + SYS_SRAND = 0x057 + SYS_CALLOC = 0x058 + SYS_FREE = 0x059 + SYS___OSENV = 0x59F + SYS___W_PIOCTL = 0x59E + SYS_LONGJMP = 0x060 + SYS___FLOORF_B = 0x60A + SYS___FLOORL_B = 0x60B + SYS___FREXPF_B = 0x60C + SYS___FREXPL_B = 0x60D + SYS___LDEXPF_B = 0x60E + SYS___LDEXPL_B = 0x60F + SYS_SIGNAL = 0x061 + SYS___ATAN2F_B = 0x61A + SYS___ATAN2L_B = 0x61B + SYS___COSHF_B = 0x61C + SYS___COSHL_B = 0x61D + SYS___EXPF_B = 0x61E + SYS___EXPL_B = 0x61F + SYS_TMPNAM = 0x062 + SYS___ABSF_B = 0x62A + SYS___ABSL_B = 0x62C + SYS___ABS_B = 0x62B + SYS___FMODF_B = 0x62D + SYS___FMODL_B = 0x62E + SYS___MODFF_B = 0x62F + SYS_ATANL = 0x63A + SYS_CEILF = 0x63B + SYS_CEILL = 0x63C + SYS_COSF = 0x63D + SYS_COSHF = 0x63F + SYS_COSL = 0x63E + SYS_REMOVE = 0x063 + SYS_POWL = 0x64A + SYS_RENAME = 0x064 + SYS_SINF = 0x64B + SYS_SINHF = 0x64F + SYS_SINL = 0x64C + SYS_SQRTF = 0x64D + SYS_SQRTL = 0x64E + SYS_BTOWC = 0x65F + SYS_FREXPL = 0x65A + SYS_LDEXPF = 0x65B + SYS_LDEXPL = 0x65C + SYS_MODFF = 0x65D + SYS_MODFL = 0x65E + SYS_TMPFILE = 0x065 + SYS_FREOPEN = 0x066 + SYS___CHARMAP_INIT_A = 0x66E + SYS___GETHOSTBYADDR_R_A = 0x66C + SYS___GETHOSTBYNAME_A = 0x66A + SYS___GETHOSTBYNAME_R_A = 0x66D + SYS___MBLEN_A = 0x66F + SYS___RES_INIT_A = 0x66B + SYS_FCLOSE = 0x067 + SYS___GETGRGID_R_A = 0x67D + SYS___WCSTOMBS_A = 0x67A + SYS___WCSTOMBS_STD_A = 0x67B + SYS___WCSWIDTH_A = 0x67C + SYS___WCSWIDTH_ASIA = 0x67F + SYS___WCSWIDTH_STD_A = 0x67E + SYS_FFLUSH = 0x068 + SYS___GETLOGIN_R_A = 0x68E + SYS___GETPWNAM_R_A = 0x68C + SYS___GETPWUID_R_A = 0x68D + SYS___TTYNAME_R_A = 0x68F + SYS___WCWIDTH_ASIA = 0x68B + SYS___WCWIDTH_STD_A = 0x68A + SYS_FOPEN = 0x069 + SYS___REGEXEC_A = 0x69A + SYS___REGEXEC_STD_A = 0x69B + SYS___REGFREE_A = 0x69C + SYS___REGFREE_STD_A = 0x69D + SYS___STRCOLL_A = 0x69E + SYS___STRCOLL_C_A = 0x69F + SYS_SCANF = 0x070 + SYS___A64L_A = 0x70C + SYS___ECVT_A = 0x70D + SYS___FCVT_A = 0x70E + SYS___GCVT_A = 0x70F + SYS___STRTOUL_A = 0x70A + SYS_____AE_CORRESTBL_QUERY_A = 0x70B + SYS_SPRINTF = 0x071 + SYS___ACCESS_A = 0x71F + SYS___CATOPEN_A = 0x71E + SYS___GETOPT_A = 0x71D + SYS___REALPATH_A = 0x71A + SYS___SETENV_A = 0x71B + SYS___SYSTEM_A = 0x71C + SYS_FGETC = 0x072 + SYS___GAI_STRERROR_A = 0x72F + SYS___RMDIR_A = 0x72A + SYS___STATVFS_A = 0x72B + SYS___SYMLINK_A = 0x72C + SYS___TRUNCATE_A = 0x72D + SYS___UNLINK_A = 0x72E + SYS_VFPRINTF = 0x073 + SYS___ISSPACE_A = 0x73A + SYS___ISUPPER_A = 0x73B + SYS___ISWALNUM_A = 0x73F + SYS___ISXDIGIT_A = 0x73C + SYS___TOLOWER_A = 0x73D + SYS___TOUPPER_A = 0x73E + SYS_VPRINTF = 0x074 + SYS___CONFSTR_A = 0x74B + SYS___FDOPEN_A = 0x74E + SYS___FLDATA_A = 0x74F + SYS___FTOK_A = 0x74C + SYS___ISWXDIGIT_A = 0x74A + SYS___MKTEMP_A = 0x74D + SYS_VSPRINTF = 0x075 + SYS___GETGRGID_A = 0x75A + SYS___GETGRNAM_A = 0x75B + SYS___GETGROUPSBYNAME_A = 0x75C + SYS___GETHOSTENT_A = 0x75D + SYS___GETHOSTNAME_A = 0x75E + SYS___GETLOGIN_A = 0x75F + SYS_GETC = 0x076 + SYS___CREATEWORKUNIT_A = 0x76A + SYS___CTERMID_A = 0x76B + SYS___FMTMSG_A = 0x76C + SYS___INITGROUPS_A = 0x76D + SYS___MSGRCV_A = 0x76F + SYS_____LOGIN_A = 0x76E + SYS_FGETS = 0x077 + SYS___STRCASECMP_A = 0x77B + SYS___STRNCASECMP_A = 0x77C + SYS___TTYNAME_A = 0x77D + SYS___UNAME_A = 0x77E + SYS___UTIMES_A = 0x77F + SYS_____SERVER_PWU_A = 0x77A + SYS_FPUTC = 0x078 + SYS___CREAT_O_A = 0x78E + SYS___ENVNA = 0x78F + SYS___FREAD_A = 0x78A + SYS___FWRITE_A = 0x78B + SYS___ISASCII = 0x78D + SYS___OPEN_O_A = 0x78C + SYS_FPUTS = 0x079 + SYS___ASCTIME_A = 0x79C + SYS___CTIME_A = 0x79D + SYS___GETDATE_A = 0x79E + SYS___GETSERVBYPORT_A = 0x79A + SYS___GETSERVENT_A = 0x79B + SYS___TZSET_A = 0x79F + SYS_ACL_FROM_TEXT = 0x80C + SYS_ACL_SET_FD = 0x80A + SYS_ACL_SET_FILE = 0x80B + SYS_ACL_SORT = 0x80E + SYS_ACL_TO_TEXT = 0x80D + SYS_UNGETC = 0x080 + SYS___SHUTDOWN_REGISTRATION = 0x80F + SYS_FREAD = 0x081 + SYS_FREEADDRINFO = 0x81A + SYS_GAI_STRERROR = 0x81B + SYS_REXEC_AF = 0x81C + SYS___DYNALLOC_A = 0x81F + SYS___POE = 0x81D + SYS_WCSTOMBS = 0x082 + SYS___INET_ADDR_A = 0x82F + SYS___NLIST_A = 0x82A + SYS_____TCGETCP_A = 0x82B + SYS_____TCSETCP_A = 0x82C + SYS_____W_PIOCTL_A = 0x82E + SYS_MBTOWC = 0x083 + SYS___CABEND = 0x83D + SYS___LE_CIB_GET = 0x83E + SYS___RECVMSG_A = 0x83B + SYS___SENDMSG_A = 0x83A + SYS___SET_LAA_FOR_JIT = 0x83F + SYS_____LCHATTR_A = 0x83C + SYS_WCTOMB = 0x084 + SYS___CBRTL_B = 0x84A + SYS___COPYSIGNF_B = 0x84B + SYS___COPYSIGNL_B = 0x84C + SYS___COTANF_B = 0x84D + SYS___COTANL_B = 0x84F + SYS___COTAN_B = 0x84E + SYS_MBSTOWCS = 0x085 + SYS___LOG1PL_B = 0x85A + SYS___LOG2F_B = 0x85B + SYS___LOG2L_B = 0x85D + SYS___LOG2_B = 0x85C + SYS___REMAINDERF_B = 0x85E + SYS___REMAINDERL_B = 0x85F + SYS_ACOSHF = 0x86E + SYS_ACOSHL = 0x86F + SYS_WCSCPY = 0x086 + SYS___ERFCF_B = 0x86D + SYS___ERFF_B = 0x86C + SYS___LROUNDF_B = 0x86A + SYS___LROUND_B = 0x86B + SYS_COTANL = 0x87A + SYS_EXP2F = 0x87B + SYS_EXP2L = 0x87C + SYS_EXPM1F = 0x87D + SYS_EXPM1L = 0x87E + SYS_FDIMF = 0x87F + SYS_WCSCAT = 0x087 + SYS___COTANL = 0x87A + SYS_REMAINDERF = 0x88A + SYS_REMAINDERL = 0x88B + SYS_REMAINDF = 0x88A + SYS_REMAINDL = 0x88B + SYS_REMQUO = 0x88D + SYS_REMQUOF = 0x88C + SYS_REMQUOL = 0x88E + SYS_TGAMMAF = 0x88F + SYS_WCSCHR = 0x088 + SYS_ERFCF = 0x89B + SYS_ERFCL = 0x89C + SYS_ERFL = 0x89A + SYS_EXP2 = 0x89E + SYS_WCSCMP = 0x089 + SYS___EXP2_B = 0x89D + SYS___FAR_JUMP = 0x89F + SYS_ABS = 0x090 + SYS___ERFCL_H = 0x90A + SYS___EXPF_H = 0x90C + SYS___EXPL_H = 0x90D + SYS___EXPM1_H = 0x90E + SYS___EXP_H = 0x90B + SYS___FDIM_H = 0x90F + SYS_DIV = 0x091 + SYS___LOG2F_H = 0x91F + SYS___LOG2_H = 0x91E + SYS___LOGB_H = 0x91D + SYS___LOGF_H = 0x91B + SYS___LOGL_H = 0x91C + SYS___LOG_H = 0x91A + SYS_LABS = 0x092 + SYS___POWL_H = 0x92A + SYS___REMAINDER_H = 0x92B + SYS___RINT_H = 0x92C + SYS___SCALB_H = 0x92D + SYS___SINF_H = 0x92F + SYS___SIN_H = 0x92E + SYS_STRNCPY = 0x093 + SYS___TANHF_H = 0x93B + SYS___TANHL_H = 0x93C + SYS___TANH_H = 0x93A + SYS___TGAMMAF_H = 0x93E + SYS___TGAMMA_H = 0x93D + SYS___TRUNC_H = 0x93F + SYS_MEMCPY = 0x094 + SYS_VFWSCANF = 0x94A + SYS_VSWSCANF = 0x94E + SYS_VWSCANF = 0x94C + SYS_INET6_RTH_ADD = 0x95D + SYS_INET6_RTH_INIT = 0x95C + SYS_INET6_RTH_REVERSE = 0x95E + SYS_INET6_RTH_SEGMENTS = 0x95F + SYS_INET6_RTH_SPACE = 0x95B + SYS_MEMMOVE = 0x095 + SYS_WCSTOLD = 0x95A + SYS_STRCPY = 0x096 + SYS_STRCMP = 0x097 + SYS_CABS = 0x98E + SYS_STRCAT = 0x098 + SYS___CABS_B = 0x98F + SYS___POW_II = 0x98A + SYS___POW_II_B = 0x98B + SYS___POW_II_H = 0x98C + SYS_CACOSF = 0x99A + SYS_CACOSL = 0x99D + SYS_STRNCAT = 0x099 + SYS___CACOSF_B = 0x99B + SYS___CACOSF_H = 0x99C + SYS___CACOSL_B = 0x99E + SYS___CACOSL_H = 0x99F + SYS_ISWALPHA = 0x100 + SYS_ISWBLANK = 0x101 + SYS___ISWBLK = 0x101 + SYS_ISWCNTRL = 0x102 + SYS_ISWDIGIT = 0x103 + SYS_ISWGRAPH = 0x104 + SYS_ISWLOWER = 0x105 + SYS_ISWPRINT = 0x106 + SYS_ISWPUNCT = 0x107 + SYS_ISWSPACE = 0x108 + SYS_ISWUPPER = 0x109 + SYS_WCTOB = 0x110 + SYS_MBRLEN = 0x111 + SYS_MBRTOWC = 0x112 + SYS_MBSRTOWC = 0x113 + SYS_MBSRTOWCS = 0x113 + SYS_WCRTOMB = 0x114 + SYS_WCSRTOMB = 0x115 + SYS_WCSRTOMBS = 0x115 + SYS___CSID = 0x116 + SYS___WCSID = 0x117 + SYS_STRPTIME = 0x118 + SYS___STRPTM = 0x118 + SYS_STRFMON = 0x119 + SYS_WCSCOLL = 0x130 + SYS_WCSXFRM = 0x131 + SYS_WCSWIDTH = 0x132 + SYS_WCWIDTH = 0x133 + SYS_WCSFTIME = 0x134 + SYS_SWPRINTF = 0x135 + SYS_VSWPRINT = 0x136 + SYS_VSWPRINTF = 0x136 + SYS_SWSCANF = 0x137 + SYS_REGCOMP = 0x138 + SYS_REGEXEC = 0x139 + SYS_GETWC = 0x140 + SYS_GETWCHAR = 0x141 + SYS_PUTWC = 0x142 + SYS_PUTWCHAR = 0x143 + SYS_UNGETWC = 0x144 + SYS_ICONV_OPEN = 0x145 + SYS_ICONV = 0x146 + SYS_ICONV_CLOSE = 0x147 + SYS_COLLRANGE = 0x150 + SYS_CCLASS = 0x151 + SYS_COLLORDER = 0x152 + SYS___DEMANGLE = 0x154 + SYS_FDOPEN = 0x155 + SYS___ERRNO = 0x156 + SYS___ERRNO2 = 0x157 + SYS___TERROR = 0x158 + SYS_MAXCOLL = 0x169 + SYS_DLLLOAD = 0x170 + SYS__EXIT = 0x174 + SYS_ACCESS = 0x175 + SYS_ALARM = 0x176 + SYS_CFGETISPEED = 0x177 + SYS_CFGETOSPEED = 0x178 + SYS_CFSETISPEED = 0x179 + SYS_CREAT = 0x180 + SYS_CTERMID = 0x181 + SYS_DUP = 0x182 + SYS_DUP2 = 0x183 + SYS_EXECL = 0x184 + SYS_EXECLE = 0x185 + SYS_EXECLP = 0x186 + SYS_EXECV = 0x187 + SYS_EXECVE = 0x188 + SYS_EXECVP = 0x189 + SYS_FSTAT = 0x190 + SYS_FSYNC = 0x191 + SYS_FTRUNCATE = 0x192 + SYS_GETCWD = 0x193 + SYS_GETEGID = 0x194 + SYS_GETEUID = 0x195 + SYS_GETGID = 0x196 + SYS_GETGRGID = 0x197 + SYS_GETGRNAM = 0x198 + SYS_GETGROUPS = 0x199 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 + SYS_PTHREAD_MUTEX_INIT = 0x203 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 + SYS_PTHREAD_MUTEX_LOCK = 0x205 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 + SYS_PTHREAD_ONCE = 0x209 + SYS_TW_OPEN = 0x210 + SYS_TW_FCNTL = 0x211 + SYS_PTHREAD_JOIN_D4_NP = 0x212 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 + SYS_EXTLINK_NP = 0x215 + SYS___PASSWD = 0x216 + SYS_SETGROUPS = 0x217 + SYS_INITGROUPS = 0x218 + SYS_WCSRCHR = 0x240 + SYS_SVC99 = 0x241 + SYS___SVC99 = 0x241 + SYS_WCSWCS = 0x242 + SYS_LOCALECO = 0x243 + SYS_LOCALECONV = 0x243 + SYS___LIBREL = 0x244 + SYS_RELEASE = 0x245 + SYS___RLSE = 0x245 + SYS_FLOCATE = 0x246 + SYS___FLOCT = 0x246 + SYS_FDELREC = 0x247 + SYS___FDLREC = 0x247 + SYS_FETCH = 0x248 + SYS___FETCH = 0x248 + SYS_QSORT = 0x249 + SYS___CLEANUPCATCH = 0x260 + SYS___CATCHMATCH = 0x261 + SYS___CLEAN2UPCATCH = 0x262 + SYS_GETPRIORITY = 0x270 + SYS_NICE = 0x271 + SYS_SETPRIORITY = 0x272 + SYS_GETITIMER = 0x273 + SYS_SETITIMER = 0x274 + SYS_MSGCTL = 0x275 + SYS_MSGGET = 0x276 + SYS_MSGRCV = 0x277 + SYS_MSGSND = 0x278 + SYS_MSGXRCV = 0x279 + SYS___MSGXR = 0x279 + SYS_SHMGET = 0x280 + SYS___GETIPC = 0x281 + SYS_SETGRENT = 0x282 + SYS_GETGRENT = 0x283 + SYS_ENDGRENT = 0x284 + SYS_SETPWENT = 0x285 + SYS_GETPWENT = 0x286 + SYS_ENDPWENT = 0x287 + SYS_BSD_SIGNAL = 0x288 + SYS_KILLPG = 0x289 + SYS_SIGSET = 0x290 + SYS_SIGSTACK = 0x291 + SYS_GETRLIMIT = 0x292 + SYS_SETRLIMIT = 0x293 + SYS_GETRUSAGE = 0x294 + SYS_MMAP = 0x295 + SYS_MPROTECT = 0x296 + SYS_MSYNC = 0x297 + SYS_MUNMAP = 0x298 + SYS_CONFSTR = 0x299 + SYS___NDMTRM = 0x300 + SYS_FTOK = 0x301 + SYS_BASENAME = 0x302 + SYS_DIRNAME = 0x303 + SYS_GETDTABLESIZE = 0x304 + SYS_MKSTEMP = 0x305 + SYS_MKTEMP = 0x306 + SYS_NFTW = 0x307 + SYS_GETWD = 0x308 + SYS_LOCKF = 0x309 + SYS_WORDEXP = 0x310 + SYS_WORDFREE = 0x311 + SYS_GETPGID = 0x312 + SYS_GETSID = 0x313 + SYS___UTMPXNAME = 0x314 + SYS_CUSERID = 0x315 + SYS_GETPASS = 0x316 + SYS_FNMATCH = 0x317 + SYS_FTW = 0x318 + SYS_GETW = 0x319 + SYS_ACOSH = 0x320 + SYS_ASINH = 0x321 + SYS_ATANH = 0x322 + SYS_CBRT = 0x323 + SYS_EXPM1 = 0x324 + SYS_ILOGB = 0x325 + SYS_LOGB = 0x326 + SYS_LOG1P = 0x327 + SYS_NEXTAFTER = 0x328 + SYS_RINT = 0x329 + SYS_SPAWN = 0x330 + SYS_SPAWNP = 0x331 + SYS_GETLOGIN_UU = 0x332 + SYS_ECVT = 0x333 + SYS_FCVT = 0x334 + SYS_GCVT = 0x335 + SYS_ACCEPT = 0x336 + SYS_BIND = 0x337 + SYS_CONNECT = 0x338 + SYS_ENDHOSTENT = 0x339 + SYS_GETHOSTENT = 0x340 + SYS_GETHOSTID = 0x341 + SYS_GETHOSTNAME = 0x342 + SYS_GETNETBYADDR = 0x343 + SYS_GETNETBYNAME = 0x344 + SYS_GETNETENT = 0x345 + SYS_GETPEERNAME = 0x346 + SYS_GETPROTOBYNAME = 0x347 + SYS_GETPROTOBYNUMBER = 0x348 + SYS_GETPROTOENT = 0x349 + SYS_INET_LNAOF = 0x350 + SYS_INET_MAKEADDR = 0x351 + SYS_INET_NETOF = 0x352 + SYS_INET_NETWORK = 0x353 + SYS_INET_NTOA = 0x354 + SYS_IOCTL = 0x355 + SYS_LISTEN = 0x356 + SYS_READV = 0x357 + SYS_RECV = 0x358 + SYS_RECVFROM = 0x359 + SYS_SETHOSTENT = 0x360 + SYS_SETNETENT = 0x361 + SYS_SETPEER = 0x362 + SYS_SETPROTOENT = 0x363 + SYS_SETSERVENT = 0x364 + SYS_SETSOCKOPT = 0x365 + SYS_SHUTDOWN = 0x366 + SYS_SOCKET = 0x367 + SYS_SOCKETPAIR = 0x368 + SYS_WRITEV = 0x369 + SYS_ENDNETENT = 0x370 + SYS_CLOSELOG = 0x371 + SYS_OPENLOG = 0x372 + SYS_SETLOGMASK = 0x373 + SYS_SYSLOG = 0x374 + SYS_PTSNAME = 0x375 + SYS_SETREUID = 0x376 + SYS_SETREGID = 0x377 + SYS_REALPATH = 0x378 + SYS___SIGNGAM = 0x379 + SYS_POLL = 0x380 + SYS_REXEC = 0x381 + SYS___ISASCII2 = 0x382 + SYS___TOASCII2 = 0x383 + SYS_CHPRIORITY = 0x384 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 + SYS___STNETENT = 0x388 + SYS___STPROTOENT = 0x389 + SYS___SELECT1 = 0x390 + SYS_PTHREAD_SECURITY_NP = 0x391 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 + SYS___CONVERT_ID_NP = 0x393 + SYS___OPENVMREL = 0x394 + SYS_WMEMCHR = 0x395 + SYS_WMEMCMP = 0x396 + SYS_WMEMCPY = 0x397 + SYS_WMEMMOVE = 0x398 + SYS_WMEMSET = 0x399 + SYS___FPUTWC = 0x400 + SYS___PUTWC = 0x401 + SYS___PWCHAR = 0x402 + SYS___WCSFTM = 0x403 + SYS___WCSTOK = 0x404 + SYS___WCWDTH = 0x405 + SYS_T_ACCEPT = 0x409 + SYS_T_GETINFO = 0x410 + SYS_T_GETPROTADDR = 0x411 + SYS_T_GETSTATE = 0x412 + SYS_T_LISTEN = 0x413 + SYS_T_LOOK = 0x414 + SYS_T_OPEN = 0x415 + SYS_T_OPTMGMT = 0x416 + SYS_T_RCV = 0x417 + SYS_T_RCVCONNECT = 0x418 + SYS_T_RCVDIS = 0x419 + SYS_T_SNDUDATA = 0x420 + SYS_T_STRERROR = 0x421 + SYS_T_SYNC = 0x422 + SYS_T_UNBIND = 0x423 + SYS___T_ERRNO = 0x424 + SYS___RECVMSG2 = 0x425 + SYS___SENDMSG2 = 0x426 + SYS_FATTACH = 0x427 + SYS_FDETACH = 0x428 + SYS_GETMSG = 0x429 + SYS_GETCONTEXT = 0x430 + SYS_SETCONTEXT = 0x431 + SYS_MAKECONTEXT = 0x432 + SYS_SWAPCONTEXT = 0x433 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 + SYS_GETCLIENTID = 0x470 + SYS___GETCLIENTID = 0x471 + SYS_GETSTABLESIZE = 0x472 + SYS_GETIBMOPT = 0x473 + SYS_GETIBMSOCKOPT = 0x474 + SYS_GIVESOCKET = 0x475 + SYS_IBMSFLUSH = 0x476 + SYS_MAXDESC = 0x477 + SYS_SETIBMOPT = 0x478 + SYS_SETIBMSOCKOPT = 0x479 + SYS___SERVER_PWU = 0x480 + SYS_PTHREAD_TAG_NP = 0x481 + SYS___CONSOLE = 0x482 + SYS___WSINIT = 0x483 + SYS___IPTCPN = 0x489 + SYS___SERVER_CLASSIFY = 0x490 + SYS___HEAPRPT = 0x496 + SYS___ISBFP = 0x500 + SYS___FP_CAST = 0x501 + SYS___CERTIFICATE = 0x502 + SYS_SEND_FILE = 0x503 + SYS_AIO_CANCEL = 0x504 + SYS_AIO_ERROR = 0x505 + SYS_AIO_READ = 0x506 + SYS_AIO_RETURN = 0x507 + SYS_AIO_SUSPEND = 0x508 + SYS_AIO_WRITE = 0x509 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 + SYS___CTTBL = 0x517 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 + SYS___FP_UNORDERED = 0x520 + SYS___FP_READ_RND = 0x521 + SYS___FP_READ_RND_B = 0x522 + SYS___FP_SWAP_RND = 0x523 + SYS___FP_SWAP_RND_B = 0x524 + SYS___FP_LEVEL = 0x525 + SYS___FP_BTOH = 0x526 + SYS___FP_HTOB = 0x527 + SYS___FPC_RD = 0x528 + SYS___FPC_WR = 0x529 + SYS_PTHREAD_SETCANCELTYPE = 0x600 + SYS_PTHREAD_TESTCANCEL = 0x601 + SYS___ATANF_B = 0x602 + SYS___ATANL_B = 0x603 + SYS___CEILF_B = 0x604 + SYS___CEILL_B = 0x605 + SYS___COSF_B = 0x606 + SYS___COSL_B = 0x607 + SYS___FABSF_B = 0x608 + SYS___FABSL_B = 0x609 + SYS___SINF_B = 0x610 + SYS___SINL_B = 0x611 + SYS___TANF_B = 0x612 + SYS___TANL_B = 0x613 + SYS___TANHF_B = 0x614 + SYS___TANHL_B = 0x615 + SYS___ACOSF_B = 0x616 + SYS___ACOSL_B = 0x617 + SYS___ASINF_B = 0x618 + SYS___ASINL_B = 0x619 + SYS___LOGF_B = 0x620 + SYS___LOGL_B = 0x621 + SYS___LOG10F_B = 0x622 + SYS___LOG10L_B = 0x623 + SYS___POWF_B = 0x624 + SYS___POWL_B = 0x625 + SYS___SINHF_B = 0x626 + SYS___SINHL_B = 0x627 + SYS___SQRTF_B = 0x628 + SYS___SQRTL_B = 0x629 + SYS___MODFL_B = 0x630 + SYS_ABSF = 0x631 + SYS_ABSL = 0x632 + SYS_ACOSF = 0x633 + SYS_ACOSL = 0x634 + SYS_ASINF = 0x635 + SYS_ASINL = 0x636 + SYS_ATAN2F = 0x637 + SYS_ATAN2L = 0x638 + SYS_ATANF = 0x639 + SYS_COSHL = 0x640 + SYS_EXPF = 0x641 + SYS_EXPL = 0x642 + SYS_TANHF = 0x643 + SYS_TANHL = 0x644 + SYS_LOG10F = 0x645 + SYS_LOG10L = 0x646 + SYS_LOGF = 0x647 + SYS_LOGL = 0x648 + SYS_POWF = 0x649 + SYS_SINHL = 0x650 + SYS_TANF = 0x651 + SYS_TANL = 0x652 + SYS_FABSF = 0x653 + SYS_FABSL = 0x654 + SYS_FLOORF = 0x655 + SYS_FLOORL = 0x656 + SYS_FMODF = 0x657 + SYS_FMODL = 0x658 + SYS_FREXPF = 0x659 + SYS___CHATTR = 0x660 + SYS___FCHATTR = 0x661 + SYS___TOCCSID = 0x662 + SYS___CSNAMETYPE = 0x663 + SYS___TOCSNAME = 0x664 + SYS___CCSIDTYPE = 0x665 + SYS___AE_CORRESTBL_QUERY = 0x666 + SYS___AE_AUTOCONVERT_STATE = 0x667 + SYS_DN_FIND = 0x668 + SYS___GETHOSTBYADDR_A = 0x669 + SYS___MBLEN_SB_A = 0x670 + SYS___MBLEN_STD_A = 0x671 + SYS___MBLEN_UTF = 0x672 + SYS___MBSTOWCS_A = 0x673 + SYS___MBSTOWCS_STD_A = 0x674 + SYS___MBTOWC_A = 0x675 + SYS___MBTOWC_ISO1 = 0x676 + SYS___MBTOWC_SBCS = 0x677 + SYS___MBTOWC_MBCS = 0x678 + SYS___MBTOWC_UTF = 0x679 + SYS___CSID_A = 0x680 + SYS___CSID_STD_A = 0x681 + SYS___WCSID_A = 0x682 + SYS___WCSID_STD_A = 0x683 + SYS___WCTOMB_A = 0x684 + SYS___WCTOMB_ISO1 = 0x685 + SYS___WCTOMB_STD_A = 0x686 + SYS___WCTOMB_UTF = 0x687 + SYS___WCWIDTH_A = 0x688 + SYS___GETGRNAM_R_A = 0x689 + SYS___READDIR_R_A = 0x690 + SYS___E2A_S = 0x691 + SYS___FNMATCH_A = 0x692 + SYS___FNMATCH_C_A = 0x693 + SYS___EXECL_A = 0x694 + SYS___FNMATCH_STD_A = 0x695 + SYS___REGCOMP_A = 0x696 + SYS___REGCOMP_STD_A = 0x697 + SYS___REGERROR_A = 0x698 + SYS___REGERROR_STD_A = 0x699 + SYS___SWPRINTF_A = 0x700 + SYS___FSCANF_A = 0x701 + SYS___SCANF_A = 0x702 + SYS___SSCANF_A = 0x703 + SYS___SWSCANF_A = 0x704 + SYS___ATOF_A = 0x705 + SYS___ATOI_A = 0x706 + SYS___ATOL_A = 0x707 + SYS___STRTOD_A = 0x708 + SYS___STRTOL_A = 0x709 + SYS___L64A_A = 0x710 + SYS___STRERROR_A = 0x711 + SYS___PERROR_A = 0x712 + SYS___FETCH_A = 0x713 + SYS___GETENV_A = 0x714 + SYS___MKSTEMP_A = 0x717 + SYS___PTSNAME_A = 0x718 + SYS___PUTENV_A = 0x719 + SYS___CHDIR_A = 0x720 + SYS___CHOWN_A = 0x721 + SYS___CHROOT_A = 0x722 + SYS___GETCWD_A = 0x723 + SYS___GETWD_A = 0x724 + SYS___LCHOWN_A = 0x725 + SYS___LINK_A = 0x726 + SYS___PATHCONF_A = 0x727 + SYS___IF_NAMEINDEX_A = 0x728 + SYS___READLINK_A = 0x729 + SYS___EXTLINK_NP_A = 0x730 + SYS___ISALNUM_A = 0x731 + SYS___ISALPHA_A = 0x732 + SYS___A2E_S = 0x733 + SYS___ISCNTRL_A = 0x734 + SYS___ISDIGIT_A = 0x735 + SYS___ISGRAPH_A = 0x736 + SYS___ISLOWER_A = 0x737 + SYS___ISPRINT_A = 0x738 + SYS___ISPUNCT_A = 0x739 + SYS___ISWALPHA_A = 0x740 + SYS___A2E_L = 0x741 + SYS___ISWCNTRL_A = 0x742 + SYS___ISWDIGIT_A = 0x743 + SYS___ISWGRAPH_A = 0x744 + SYS___ISWLOWER_A = 0x745 + SYS___ISWPRINT_A = 0x746 + SYS___ISWPUNCT_A = 0x747 + SYS___ISWSPACE_A = 0x748 + SYS___ISWUPPER_A = 0x749 + SYS___REMOVE_A = 0x750 + SYS___RENAME_A = 0x751 + SYS___TMPNAM_A = 0x752 + SYS___FOPEN_A = 0x753 + SYS___FREOPEN_A = 0x754 + SYS___CUSERID_A = 0x755 + SYS___POPEN_A = 0x756 + SYS___TEMPNAM_A = 0x757 + SYS___FTW_A = 0x758 + SYS___GETGRENT_A = 0x759 + SYS___INET_NTOP_A = 0x760 + SYS___GETPASS_A = 0x761 + SYS___GETPWENT_A = 0x762 + SYS___GETPWNAM_A = 0x763 + SYS___GETPWUID_A = 0x764 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 + SYS___CHECKSCHENV_A = 0x766 + SYS___CONNECTSERVER_A = 0x767 + SYS___CONNECTWORKMGR_A = 0x768 + SYS_____CONSOLE_A = 0x769 + SYS___MSGSND_A = 0x770 + SYS___MSGXRCV_A = 0x771 + SYS___NFTW_A = 0x772 + SYS_____PASSWD_A = 0x773 + SYS___PTHREAD_SECURITY_NP_A = 0x774 + SYS___QUERYMETRICS_A = 0x775 + SYS___QUERYSCHENV = 0x776 + SYS___READV_A = 0x777 + SYS_____SERVER_CLASSIFY_A = 0x778 + SYS_____SERVER_INIT_A = 0x779 + SYS___W_GETPSENT_A = 0x780 + SYS___WRITEV_A = 0x781 + SYS___W_STATFS_A = 0x782 + SYS___W_STATVFS_A = 0x783 + SYS___FPUTC_A = 0x784 + SYS___PUTCHAR_A = 0x785 + SYS___PUTS_A = 0x786 + SYS___FGETS_A = 0x787 + SYS___GETS_A = 0x788 + SYS___FPUTS_A = 0x789 + SYS___PUTC_A = 0x790 + SYS___AE_THREAD_SETMODE = 0x791 + SYS___AE_THREAD_SWAPMODE = 0x792 + SYS___GETNETBYADDR_A = 0x793 + SYS___GETNETBYNAME_A = 0x794 + SYS___GETNETENT_A = 0x795 + SYS___GETPROTOBYNAME_A = 0x796 + SYS___GETPROTOBYNUMBER_A = 0x797 + SYS___GETPROTOENT_A = 0x798 + SYS___GETSERVBYNAME_A = 0x799 + SYS_ACL_FIRST_ENTRY = 0x800 + SYS_ACL_GET_ENTRY = 0x801 + SYS_ACL_VALID = 0x802 + SYS_ACL_CREATE_ENTRY = 0x803 + SYS_ACL_DELETE_ENTRY = 0x804 + SYS_ACL_UPDATE_ENTRY = 0x805 + SYS_ACL_DELETE_FD = 0x806 + SYS_ACL_DELETE_FILE = 0x807 + SYS_ACL_GET_FD = 0x808 + SYS_ACL_GET_FILE = 0x809 + SYS___ERFL_B = 0x810 + SYS___ERFCL_B = 0x811 + SYS___LGAMMAL_B = 0x812 + SYS___SETHOOKEVENTS = 0x813 + SYS_IF_NAMETOINDEX = 0x814 + SYS_IF_INDEXTONAME = 0x815 + SYS_IF_NAMEINDEX = 0x816 + SYS_IF_FREENAMEINDEX = 0x817 + SYS_GETADDRINFO = 0x818 + SYS_GETNAMEINFO = 0x819 + SYS___DYNFREE_A = 0x820 + SYS___RES_QUERY_A = 0x821 + SYS___RES_SEARCH_A = 0x822 + SYS___RES_QUERYDOMAIN_A = 0x823 + SYS___RES_MKQUERY_A = 0x824 + SYS___RES_SEND_A = 0x825 + SYS___DN_EXPAND_A = 0x826 + SYS___DN_SKIPNAME_A = 0x827 + SYS___DN_COMP_A = 0x828 + SYS___DN_FIND_A = 0x829 + SYS___INET_NTOA_A = 0x830 + SYS___INET_NETWORK_A = 0x831 + SYS___ACCEPT_A = 0x832 + SYS___ACCEPT_AND_RECV_A = 0x833 + SYS___BIND_A = 0x834 + SYS___CONNECT_A = 0x835 + SYS___GETPEERNAME_A = 0x836 + SYS___GETSOCKNAME_A = 0x837 + SYS___RECVFROM_A = 0x838 + SYS___SENDTO_A = 0x839 + SYS___LCHATTR = 0x840 + SYS___WRITEDOWN = 0x841 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 + SYS___ACOSHF_B = 0x843 + SYS___ACOSHL_B = 0x844 + SYS___ASINHF_B = 0x845 + SYS___ASINHL_B = 0x846 + SYS___ATANHF_B = 0x847 + SYS___ATANHL_B = 0x848 + SYS___CBRTF_B = 0x849 + SYS___EXP2F_B = 0x850 + SYS___EXP2L_B = 0x851 + SYS___EXPM1F_B = 0x852 + SYS___EXPM1L_B = 0x853 + SYS___FDIMF_B = 0x854 + SYS___FDIM_B = 0x855 + SYS___FDIML_B = 0x856 + SYS___HYPOTF_B = 0x857 + SYS___HYPOTL_B = 0x858 + SYS___LOG1PF_B = 0x859 + SYS___REMQUOF_B = 0x860 + SYS___REMQUO_B = 0x861 + SYS___REMQUOL_B = 0x862 + SYS___TGAMMAF_B = 0x863 + SYS___TGAMMA_B = 0x864 + SYS___TGAMMAL_B = 0x865 + SYS___TRUNCF_B = 0x866 + SYS___TRUNC_B = 0x867 + SYS___TRUNCL_B = 0x868 + SYS___LGAMMAF_B = 0x869 + SYS_ASINHF = 0x870 + SYS_ASINHL = 0x871 + SYS_ATANHF = 0x872 + SYS_ATANHL = 0x873 + SYS_CBRTF = 0x874 + SYS_CBRTL = 0x875 + SYS_COPYSIGNF = 0x876 + SYS_CPYSIGNF = 0x876 + SYS_COPYSIGNL = 0x877 + SYS_CPYSIGNL = 0x877 + SYS_COTANF = 0x878 + SYS___COTANF = 0x878 + SYS_COTAN = 0x879 + SYS___COTAN = 0x879 + SYS_FDIM = 0x881 + SYS_FDIML = 0x882 + SYS_HYPOTF = 0x883 + SYS_HYPOTL = 0x884 + SYS_LOG1PF = 0x885 + SYS_LOG1PL = 0x886 + SYS_LOG2F = 0x887 + SYS_LOG2 = 0x888 + SYS_LOG2L = 0x889 + SYS_TGAMMA = 0x890 + SYS_TGAMMAL = 0x891 + SYS_TRUNCF = 0x892 + SYS_TRUNC = 0x893 + SYS_TRUNCL = 0x894 + SYS_LGAMMAF = 0x895 + SYS_LGAMMAL = 0x896 + SYS_LROUNDF = 0x897 + SYS_LROUND = 0x898 + SYS_ERFF = 0x899 + SYS___COSHF_H = 0x900 + SYS___COSHL_H = 0x901 + SYS___COTAN_H = 0x902 + SYS___COTANF_H = 0x903 + SYS___COTANL_H = 0x904 + SYS___ERF_H = 0x905 + SYS___ERFF_H = 0x906 + SYS___ERFL_H = 0x907 + SYS___ERFC_H = 0x908 + SYS___ERFCF_H = 0x909 + SYS___FDIMF_H = 0x910 + SYS___FDIML_H = 0x911 + SYS___FMOD_H = 0x912 + SYS___FMODF_H = 0x913 + SYS___FMODL_H = 0x914 + SYS___GAMMA_H = 0x915 + SYS___HYPOT_H = 0x916 + SYS___ILOGB_H = 0x917 + SYS___LGAMMA_H = 0x918 + SYS___LGAMMAF_H = 0x919 + SYS___LOG2L_H = 0x920 + SYS___LOG1P_H = 0x921 + SYS___LOG10_H = 0x922 + SYS___LOG10F_H = 0x923 + SYS___LOG10L_H = 0x924 + SYS___LROUND_H = 0x925 + SYS___LROUNDF_H = 0x926 + SYS___NEXTAFTER_H = 0x927 + SYS___POW_H = 0x928 + SYS___POWF_H = 0x929 + SYS___SINL_H = 0x930 + SYS___SINH_H = 0x931 + SYS___SINHF_H = 0x932 + SYS___SINHL_H = 0x933 + SYS___SQRT_H = 0x934 + SYS___SQRTF_H = 0x935 + SYS___SQRTL_H = 0x936 + SYS___TAN_H = 0x937 + SYS___TANF_H = 0x938 + SYS___TANL_H = 0x939 + SYS___TRUNCF_H = 0x940 + SYS___TRUNCL_H = 0x941 + SYS___COSH_H = 0x942 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 + SYS_VFSCANF = 0x944 + SYS_VSCANF = 0x946 + SYS_VSSCANF = 0x948 + SYS_IMAXABS = 0x950 + SYS_IMAXDIV = 0x951 + SYS_STRTOIMAX = 0x952 + SYS_STRTOUMAX = 0x953 + SYS_WCSTOIMAX = 0x954 + SYS_WCSTOUMAX = 0x955 + SYS_ATOLL = 0x956 + SYS_STRTOF = 0x957 + SYS_STRTOLD = 0x958 + SYS_WCSTOF = 0x959 + SYS_INET6_RTH_GETADDR = 0x960 + SYS_INET6_OPT_INIT = 0x961 + SYS_INET6_OPT_APPEND = 0x962 + SYS_INET6_OPT_FINISH = 0x963 + SYS_INET6_OPT_SET_VAL = 0x964 + SYS_INET6_OPT_NEXT = 0x965 + SYS_INET6_OPT_FIND = 0x966 + SYS_INET6_OPT_GET_VAL = 0x967 + SYS___POW_I = 0x987 + SYS___POW_I_B = 0x988 + SYS___POW_I_H = 0x989 + SYS___CABS_H = 0x990 + SYS_CABSF = 0x991 + SYS___CABSF_B = 0x992 + SYS___CABSF_H = 0x993 + SYS_CABSL = 0x994 + SYS___CABSL_B = 0x995 + SYS___CABSL_H = 0x996 + SYS_CACOS = 0x997 + SYS___CACOS_B = 0x998 + SYS___CACOS_H = 0x999 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 295859c50..7a8161c1d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -1,6 +1,7 @@ // cgo -godefs types_aix.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc && aix // +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index a9ee0ffd4..07ed733c5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -1,6 +1,7 @@ // cgo -godefs types_aix.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && aix // +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go deleted file mode 100644 index 725b4bee2..000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ /dev/null @@ -1,516 +0,0 @@ -// cgo -godefs types_darwin.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build 386,darwin - -package unix - -const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 -) - -type Timespec struct { - Sec int32 - Nsec int32 -} - -type Timeval struct { - Sec int32 - Usec int32 -} - -type Timeval32 struct{} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int32 - Ixrss int32 - Idrss int32 - Isrss int32 - Minflt int32 - Majflt int32 - Nswap int32 - Inblock int32 - Oublock int32 - Msgsnd int32 - Msgrcv int32 - Nsignals int32 - Nvcsw int32 - Nivcsw int32 -} - -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - -type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 -} - -type Statfs_t struct { - Bsize uint32 - Iosize int32 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Owner uint32 - Type uint32 - Flags uint32 - Fssubtype uint32 - Fstypename [16]byte - Mntonname [1024]byte - Mntfromname [1024]byte - Reserved [8]uint32 -} - -type Flock_t struct { - Start int64 - Len int64 - Pid int32 - Type int16 - Whence int16 -} - -type Fstore_t struct { - Flags uint32 - Posmode int32 - Offset int64 - Length int64 - Bytesalloc int64 -} - -type Radvisory_t struct { - Offset int64 - Count int32 -} - -type Fbootstraptransfer_t struct { - Offset int64 - Length uint32 - Buffer *byte -} - -type Log2phys_t struct { - Flags uint32 - Contigbytes int64 - Devoffset int64 -} - -type Fsid struct { - Val [2]int32 -} - -type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - _ [3]byte -} - -const ( - PathMax = 0x400 -) - -type RawSockaddrInet4 struct { - Len uint8 - Family uint8 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]int8 -} - -type RawSockaddrInet6 struct { - Len uint8 - Family uint8 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Len uint8 - Family uint8 - Path [104]int8 -} - -type RawSockaddrDatalink struct { - Len uint8 - Family uint8 - Index uint16 - Type uint8 - Nlen uint8 - Alen uint8 - Slen uint8 - Data [12]int8 -} - -type RawSockaddr struct { - Len uint8 - Family uint8 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [92]int8 -} - -type RawSockaddrCtl struct { - Sc_len uint8 - Sc_family uint8 - Ss_sysaddr uint16 - Sc_id uint32 - Sc_unit uint32 - Sc_reserved [5]uint32 -} - -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - -type Iovec struct { - Base *byte - Len uint32 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type Msghdr struct { - Name *byte - Namelen uint32 - Iov *Iovec - Iovlen int32 - Control *byte - Controllen uint32 - Flags int32 -} - -type Cmsghdr struct { - Len uint32 - Level int32 - Type int32 -} - -type Inet4Pktinfo struct { - Ifindex uint32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Filt [8]uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 -) - -const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 -) - -type Kevent_t struct { - Ident uint32 - Filter int16 - Flags uint16 - Fflags uint32 - Data int32 - Udata *byte -} - -type FdSet struct { - Bits [32]int32 -} - -const ( - SizeofIfMsghdr = 0x70 - SizeofIfData = 0x60 - SizeofIfaMsghdr = 0x14 - SizeofIfmaMsghdr = 0x10 - SizeofIfmaMsghdr2 = 0x14 - SizeofRtMsghdr = 0x5c - SizeofRtMetrics = 0x38 -) - -type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Data IfData -} - -type IfData struct { - Type uint8 - Typelen uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Recvquota uint8 - Xmitquota uint8 - Unused1 uint8 - Mtu uint32 - Metric uint32 - Baudrate uint32 - Ipackets uint32 - Ierrors uint32 - Opackets uint32 - Oerrors uint32 - Collisions uint32 - Ibytes uint32 - Obytes uint32 - Imcasts uint32 - Omcasts uint32 - Iqdrops uint32 - Noproto uint32 - Recvtiming uint32 - Xmittiming uint32 - Lastchange Timeval - Unused2 uint32 - Hwassist uint32 - Reserved1 uint32 - Reserved2 uint32 -} - -type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Metric int32 -} - -type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - _ [2]byte -} - -type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Refcount int32 -} - -type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics -} - -type RtMetrics struct { - Locks uint32 - Mtu uint32 - Hopcount uint32 - Expire int32 - Recvpipe uint32 - Sendpipe uint32 - Ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Pksent uint32 - State uint32 - Filler [3]uint32 -} - -const ( - SizeofBpfVersion = 0x4 - SizeofBpfStat = 0x8 - SizeofBpfProgram = 0x8 - SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 -) - -type BpfVersion struct { - Major uint16 - Minor uint16 -} - -type BpfStat struct { - Recv uint32 - Drop uint32 -} - -type BpfProgram struct { - Len uint32 - Insns *BpfInsn -} - -type BpfInsn struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - _ [2]byte -} - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - AT_FDCWD = -0x2 - AT_REMOVEDIR = 0x80 - AT_SYMLINK_FOLLOW = 0x40 - AT_SYMLINK_NOFOLLOW = 0x20 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLERR = 0x8 - POLLHUP = 0x10 - POLLIN = 0x1 - POLLNVAL = 0x20 - POLLOUT = 0x4 - POLLPRI = 0x2 - POLLRDBAND = 0x80 - POLLRDNORM = 0x40 - POLLWRBAND = 0x100 - POLLWRNORM = 0x4 -) - -type Utsname struct { - Sysname [256]byte - Nodename [256]byte - Release [256]byte - Version [256]byte - Machine [256]byte -} - -const SizeofClockinfo = 0x14 - -type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 -} - -type CtlInfo struct { - Id uint32 - Name [96]byte -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 080ffce32..2673e6c59 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_darwin.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && darwin // +build amd64,darwin package unix @@ -210,6 +211,13 @@ type RawSockaddrCtl struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} + type Linger struct { Onoff int32 Linger int32 @@ -225,6 +233,12 @@ type IPMreq struct { Interface [4]byte /* in_addr */ } +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + type IPv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 @@ -273,9 +287,11 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 SizeofSockaddrCtl = 0x20 + SizeofXucred = 0x4c SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 SizeofCmsghdr = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go deleted file mode 100644 index f2a77bc4e..000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ /dev/null @@ -1,516 +0,0 @@ -// cgo -godefs types_darwin.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build arm,darwin - -package unix - -const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 -) - -type Timespec struct { - Sec int32 - Nsec int32 -} - -type Timeval struct { - Sec int32 - Usec int32 -} - -type Timeval32 struct{} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int32 - Ixrss int32 - Idrss int32 - Isrss int32 - Minflt int32 - Majflt int32 - Nswap int32 - Inblock int32 - Oublock int32 - Msgsnd int32 - Msgrcv int32 - Nsignals int32 - Nvcsw int32 - Nivcsw int32 -} - -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - -type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 -} - -type Statfs_t struct { - Bsize uint32 - Iosize int32 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Owner uint32 - Type uint32 - Flags uint32 - Fssubtype uint32 - Fstypename [16]byte - Mntonname [1024]byte - Mntfromname [1024]byte - Reserved [8]uint32 -} - -type Flock_t struct { - Start int64 - Len int64 - Pid int32 - Type int16 - Whence int16 -} - -type Fstore_t struct { - Flags uint32 - Posmode int32 - Offset int64 - Length int64 - Bytesalloc int64 -} - -type Radvisory_t struct { - Offset int64 - Count int32 -} - -type Fbootstraptransfer_t struct { - Offset int64 - Length uint32 - Buffer *byte -} - -type Log2phys_t struct { - Flags uint32 - Contigbytes int64 - Devoffset int64 -} - -type Fsid struct { - Val [2]int32 -} - -type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - _ [3]byte -} - -const ( - PathMax = 0x400 -) - -type RawSockaddrInet4 struct { - Len uint8 - Family uint8 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]int8 -} - -type RawSockaddrInet6 struct { - Len uint8 - Family uint8 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Len uint8 - Family uint8 - Path [104]int8 -} - -type RawSockaddrDatalink struct { - Len uint8 - Family uint8 - Index uint16 - Type uint8 - Nlen uint8 - Alen uint8 - Slen uint8 - Data [12]int8 -} - -type RawSockaddr struct { - Len uint8 - Family uint8 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [92]int8 -} - -type RawSockaddrCtl struct { - Sc_len uint8 - Sc_family uint8 - Ss_sysaddr uint16 - Sc_id uint32 - Sc_unit uint32 - Sc_reserved [5]uint32 -} - -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - -type Iovec struct { - Base *byte - Len uint32 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type Msghdr struct { - Name *byte - Namelen uint32 - Iov *Iovec - Iovlen int32 - Control *byte - Controllen uint32 - Flags int32 -} - -type Cmsghdr struct { - Len uint32 - Level int32 - Type int32 -} - -type Inet4Pktinfo struct { - Ifindex uint32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Filt [8]uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 -) - -const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 -) - -type Kevent_t struct { - Ident uint32 - Filter int16 - Flags uint16 - Fflags uint32 - Data int32 - Udata *byte -} - -type FdSet struct { - Bits [32]int32 -} - -const ( - SizeofIfMsghdr = 0x70 - SizeofIfData = 0x60 - SizeofIfaMsghdr = 0x14 - SizeofIfmaMsghdr = 0x10 - SizeofIfmaMsghdr2 = 0x14 - SizeofRtMsghdr = 0x5c - SizeofRtMetrics = 0x38 -) - -type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Data IfData -} - -type IfData struct { - Type uint8 - Typelen uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Recvquota uint8 - Xmitquota uint8 - Unused1 uint8 - Mtu uint32 - Metric uint32 - Baudrate uint32 - Ipackets uint32 - Ierrors uint32 - Opackets uint32 - Oerrors uint32 - Collisions uint32 - Ibytes uint32 - Obytes uint32 - Imcasts uint32 - Omcasts uint32 - Iqdrops uint32 - Noproto uint32 - Recvtiming uint32 - Xmittiming uint32 - Lastchange Timeval - Unused2 uint32 - Hwassist uint32 - Reserved1 uint32 - Reserved2 uint32 -} - -type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Metric int32 -} - -type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - _ [2]byte -} - -type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Refcount int32 -} - -type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics -} - -type RtMetrics struct { - Locks uint32 - Mtu uint32 - Hopcount uint32 - Expire int32 - Recvpipe uint32 - Sendpipe uint32 - Ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Pksent uint32 - State uint32 - Filler [3]uint32 -} - -const ( - SizeofBpfVersion = 0x4 - SizeofBpfStat = 0x8 - SizeofBpfProgram = 0x8 - SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 -) - -type BpfVersion struct { - Major uint16 - Minor uint16 -} - -type BpfStat struct { - Recv uint32 - Drop uint32 -} - -type BpfProgram struct { - Len uint32 - Insns *BpfInsn -} - -type BpfInsn struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - _ [2]byte -} - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - AT_FDCWD = -0x2 - AT_REMOVEDIR = 0x80 - AT_SYMLINK_FOLLOW = 0x40 - AT_SYMLINK_NOFOLLOW = 0x20 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLERR = 0x8 - POLLHUP = 0x10 - POLLIN = 0x1 - POLLNVAL = 0x20 - POLLOUT = 0x4 - POLLPRI = 0x2 - POLLRDBAND = 0x80 - POLLRDNORM = 0x40 - POLLWRBAND = 0x100 - POLLWRNORM = 0x4 -) - -type Utsname struct { - Sysname [256]byte - Nodename [256]byte - Release [256]byte - Version [256]byte - Machine [256]byte -} - -const SizeofClockinfo = 0x14 - -type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 -} - -type CtlInfo struct { - Id uint32 - Name [96]byte -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index c9492428b..1465cbcff 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs types_darwin.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && darwin // +build arm64,darwin package unix @@ -210,6 +211,13 @@ type RawSockaddrCtl struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} + type Linger struct { Onoff int32 Linger int32 @@ -225,6 +233,12 @@ type IPMreq struct { Interface [4]byte /* in_addr */ } +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + type IPv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 @@ -273,9 +287,11 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 SizeofSockaddrCtl = 0x20 + SizeofXucred = 0x4c SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 SizeofCmsghdr = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 85506a05d..d0ba8e9b8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_dragonfly.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && dragonfly // +build amd64,dragonfly package unix @@ -430,6 +431,9 @@ type Winsize struct { const ( AT_FDCWD = 0xfffafdcd AT_SYMLINK_NOFOLLOW = 0x1 + AT_REMOVEDIR = 0x2 + AT_EACCESS = 0x4 + AT_SYMLINK_FOLLOW = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 3e9dad33e..1f99c024a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -1,6 +1,7 @@ // cgo -godefs types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && freebsd // +build 386,freebsd package unix @@ -250,6 +251,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -312,6 +321,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x50 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -662,9 +672,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index e00e61554..ddf0305a5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && freebsd // +build amd64,freebsd package unix @@ -246,6 +247,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -308,6 +317,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -665,9 +675,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 5da13c871..dce0a5c80 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && freebsd // +build arm,freebsd package unix @@ -248,6 +249,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -310,6 +319,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x50 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -646,9 +656,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 995ecf9d4..e23244702 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && freebsd // +build arm64,freebsd package unix @@ -246,6 +247,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -308,6 +317,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -643,9 +653,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go new file mode 100644 index 000000000..236f37ef6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go @@ -0,0 +1,40 @@ +// cgo -godefs types_illumos.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build amd64 && illumos +// +build amd64,illumos + +package unix + +const ( + TUNNEWPPA = 0x540001 + TUNSETPPA = 0x540002 + + I_STR = 0x5308 + I_POP = 0x5303 + I_PUSH = 0x5302 + I_PLINK = 0x5316 + I_PUNLINK = 0x5317 + + IF_UNITSEL = -0x7ffb8cca +) + +type strbuf struct { + Maxlen int32 + Len int32 + Buf *int8 +} + +type Strioctl struct { + Cmd int32 + Timout int32 + Len int32 + Dp *int8 +} + +type Lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 9f3b1a4e5..72887abe5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,5 +1,6 @@ // Code generated by mkmerge.go; DO NOT EDIT. +//go:build linux // +build linux package unix @@ -83,7 +84,7 @@ type FileCloneRange struct { Dest_offset uint64 } -type FileDedupeRange struct { +type RawFileDedupeRange struct { Src_offset uint64 Src_length uint64 Dest_count uint16 @@ -91,6 +92,21 @@ type FileDedupeRange struct { Reserved2 uint32 } +type RawFileDedupeRangeInfo struct { + Dest_fd int64 + Dest_offset uint64 + Bytes_deduped uint64 + Status int32 + Reserved uint32 +} + +const ( + SizeofRawFileDedupeRange = 0x18 + SizeofRawFileDedupeRangeInfo = 0x20 + FILE_DEDUPE_RANGE_SAME = 0x0 + FILE_DEDUPE_RANGE_DIFFERS = 0x1 +) + type FscryptPolicy struct { Version uint8 Contents_encryption_mode uint8 @@ -288,7 +304,8 @@ type RawSockaddrVM struct { Reserved1 uint16 Port uint32 Cid uint32 - Zero [4]uint8 + Flags uint8 + Zero [3]uint8 } type RawSockaddrXDP struct { @@ -334,6 +351,13 @@ type RawSockaddrIUCV struct { Name [8]int8 } +type RawSockaddrNFC struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 +} + type _Socklen uint32 type Linger struct { @@ -447,6 +471,7 @@ const ( SizeofSockaddrL2TPIP = 0x10 SizeofSockaddrL2TPIP6 = 0x20 SizeofSockaddrIUCV = 0x20 + SizeofSockaddrNFC = 0x10 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -999,7 +1024,10 @@ const ( PERF_SAMPLE_PHYS_ADDR = 0x80000 PERF_SAMPLE_AUX = 0x100000 PERF_SAMPLE_CGROUP = 0x200000 - PERF_SAMPLE_MAX = 0x400000 + PERF_SAMPLE_DATA_PAGE_SIZE = 0x400000 + PERF_SAMPLE_CODE_PAGE_SIZE = 0x800000 + PERF_SAMPLE_WEIGHT_STRUCT = 0x1000000 + PERF_SAMPLE_MAX = 0x2000000 PERF_SAMPLE_BRANCH_USER_SHIFT = 0x0 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 0x1 PERF_SAMPLE_BRANCH_HV_SHIFT = 0x2 @@ -3109,7 +3137,8 @@ const ( DEVLINK_ATTR_REMOTE_RELOAD_STATS = 0xa1 DEVLINK_ATTR_RELOAD_ACTION_INFO = 0xa2 DEVLINK_ATTR_RELOAD_ACTION_STATS = 0xa3 - DEVLINK_ATTR_MAX = 0xa3 + DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 0xa4 + DEVLINK_ATTR_MAX = 0xa4 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3123,7 +3152,9 @@ const ( DEVLINK_RESOURCE_UNIT_ENTRY = 0x0 DEVLINK_PORT_FUNCTION_ATTR_UNSPEC = 0x0 DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 0x1 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x1 + DEVLINK_PORT_FN_ATTR_STATE = 0x2 + DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x3 ) type FsverityDigest struct { @@ -3492,7 +3523,8 @@ const ( ETHTOOL_A_LINKMODES_DUPLEX = 0x6 ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 0x7 ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 0x8 - ETHTOOL_A_LINKMODES_MAX = 0x8 + ETHTOOL_A_LINKMODES_LANES = 0x9 + ETHTOOL_A_LINKMODES_MAX = 0x9 ETHTOOL_A_LINKSTATE_UNSPEC = 0x0 ETHTOOL_A_LINKSTATE_HEADER = 0x1 ETHTOOL_A_LINKSTATE_LINK = 0x2 @@ -3680,3 +3712,196 @@ const ( ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 0x2 ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) + +type EthtoolDrvinfo struct { + Cmd uint32 + Driver [32]byte + Version [32]byte + Fw_version [32]byte + Bus_info [32]byte + Erom_version [32]byte + Reserved2 [12]byte + N_priv_flags uint32 + N_stats uint32 + Testinfo_len uint32 + Eedump_len uint32 + Regdump_len uint32 +} + +type ( + HIDRawReportDescriptor struct { + Size uint32 + Value [4096]uint8 + } + HIDRawDevInfo struct { + Bustype uint32 + Vendor int16 + Product int16 + } +) + +const ( + CLOSE_RANGE_UNSHARE = 0x2 + CLOSE_RANGE_CLOEXEC = 0x4 +) + +const ( + NLMSGERR_ATTR_MSG = 0x1 + NLMSGERR_ATTR_OFFS = 0x2 + NLMSGERR_ATTR_COOKIE = 0x3 +) + +type ( + EraseInfo struct { + Start uint32 + Length uint32 + } + EraseInfo64 struct { + Start uint64 + Length uint64 + } + MtdOobBuf struct { + Start uint32 + Length uint32 + Ptr *uint8 + } + MtdOobBuf64 struct { + Start uint64 + Pad uint32 + Length uint32 + Ptr uint64 + } + MtdWriteReq struct { + Start uint64 + Len uint64 + Ooblen uint64 + Data uint64 + Oob uint64 + Mode uint8 + _ [7]uint8 + } + MtdInfo struct { + Type uint8 + Flags uint32 + Size uint32 + Erasesize uint32 + Writesize uint32 + Oobsize uint32 + _ uint64 + } + RegionInfo struct { + Offset uint32 + Erasesize uint32 + Numblocks uint32 + Regionindex uint32 + } + OtpInfo struct { + Start uint32 + Length uint32 + Locked uint32 + } + NandOobinfo struct { + Useecc uint32 + Eccbytes uint32 + Oobfree [8][2]uint32 + Eccpos [32]uint32 + } + NandOobfree struct { + Offset uint32 + Length uint32 + } + NandEcclayout struct { + Eccbytes uint32 + Eccpos [64]uint32 + Oobavail uint32 + Oobfree [8]NandOobfree + } + MtdEccStats struct { + Corrected uint32 + Failed uint32 + Badblocks uint32 + Bbtblocks uint32 + } +) + +const ( + MTD_OPS_PLACE_OOB = 0x0 + MTD_OPS_AUTO_OOB = 0x1 + MTD_OPS_RAW = 0x2 +) + +const ( + MTD_FILE_MODE_NORMAL = 0x0 + MTD_FILE_MODE_OTP_FACTORY = 0x1 + MTD_FILE_MODE_OTP_USER = 0x2 + MTD_FILE_MODE_RAW = 0x3 +) + +const ( + NFC_CMD_UNSPEC = 0x0 + NFC_CMD_GET_DEVICE = 0x1 + NFC_CMD_DEV_UP = 0x2 + NFC_CMD_DEV_DOWN = 0x3 + NFC_CMD_DEP_LINK_UP = 0x4 + NFC_CMD_DEP_LINK_DOWN = 0x5 + NFC_CMD_START_POLL = 0x6 + NFC_CMD_STOP_POLL = 0x7 + NFC_CMD_GET_TARGET = 0x8 + NFC_EVENT_TARGETS_FOUND = 0x9 + NFC_EVENT_DEVICE_ADDED = 0xa + NFC_EVENT_DEVICE_REMOVED = 0xb + NFC_EVENT_TARGET_LOST = 0xc + NFC_EVENT_TM_ACTIVATED = 0xd + NFC_EVENT_TM_DEACTIVATED = 0xe + NFC_CMD_LLC_GET_PARAMS = 0xf + NFC_CMD_LLC_SET_PARAMS = 0x10 + NFC_CMD_ENABLE_SE = 0x11 + NFC_CMD_DISABLE_SE = 0x12 + NFC_CMD_LLC_SDREQ = 0x13 + NFC_EVENT_LLC_SDRES = 0x14 + NFC_CMD_FW_DOWNLOAD = 0x15 + NFC_EVENT_SE_ADDED = 0x16 + NFC_EVENT_SE_REMOVED = 0x17 + NFC_EVENT_SE_CONNECTIVITY = 0x18 + NFC_EVENT_SE_TRANSACTION = 0x19 + NFC_CMD_GET_SE = 0x1a + NFC_CMD_SE_IO = 0x1b + NFC_CMD_ACTIVATE_TARGET = 0x1c + NFC_CMD_VENDOR = 0x1d + NFC_CMD_DEACTIVATE_TARGET = 0x1e + NFC_ATTR_UNSPEC = 0x0 + NFC_ATTR_DEVICE_INDEX = 0x1 + NFC_ATTR_DEVICE_NAME = 0x2 + NFC_ATTR_PROTOCOLS = 0x3 + NFC_ATTR_TARGET_INDEX = 0x4 + NFC_ATTR_TARGET_SENS_RES = 0x5 + NFC_ATTR_TARGET_SEL_RES = 0x6 + NFC_ATTR_TARGET_NFCID1 = 0x7 + NFC_ATTR_TARGET_SENSB_RES = 0x8 + NFC_ATTR_TARGET_SENSF_RES = 0x9 + NFC_ATTR_COMM_MODE = 0xa + NFC_ATTR_RF_MODE = 0xb + NFC_ATTR_DEVICE_POWERED = 0xc + NFC_ATTR_IM_PROTOCOLS = 0xd + NFC_ATTR_TM_PROTOCOLS = 0xe + NFC_ATTR_LLC_PARAM_LTO = 0xf + NFC_ATTR_LLC_PARAM_RW = 0x10 + NFC_ATTR_LLC_PARAM_MIUX = 0x11 + NFC_ATTR_SE = 0x12 + NFC_ATTR_LLC_SDP = 0x13 + NFC_ATTR_FIRMWARE_NAME = 0x14 + NFC_ATTR_SE_INDEX = 0x15 + NFC_ATTR_SE_TYPE = 0x16 + NFC_ATTR_SE_AID = 0x17 + NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS = 0x18 + NFC_ATTR_SE_APDU = 0x19 + NFC_ATTR_TARGET_ISO15693_DSFID = 0x1a + NFC_ATTR_TARGET_ISO15693_UID = 0x1b + NFC_ATTR_SE_PARAMS = 0x1c + NFC_ATTR_VENDOR_ID = 0x1d + NFC_ATTR_VENDOR_SUBCMD = 0x1e + NFC_ATTR_VENDOR_DATA = 0x1f + NFC_SDP_ATTR_UNSPEC = 0x0 + NFC_SDP_ATTR_URI = 0x1 + NFC_SDP_ATTR_SAP = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 088bd77e3..235c62e46 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && linux // +build 386,linux package unix @@ -127,6 +128,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -159,9 +171,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 078d958ec..99b1e5b6a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && linux // +build amd64,linux package unix @@ -129,6 +130,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -162,9 +174,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 2d39122f4..cc8bba791 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && linux // +build arm,linux package unix @@ -133,6 +134,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -165,9 +177,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 304cbd045..fa8fe3a75 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && linux // +build arm64,linux package unix @@ -130,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -163,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 7d9d57006..e7fb8d9b7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips && linux // +build mips,linux package unix @@ -132,6 +133,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -164,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index a1eb2577b..2fa61d593 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && linux // +build mips64,linux package unix @@ -130,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -163,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 2e5ce3b6a..7f3639933 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64le && linux // +build mips64le,linux package unix @@ -130,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -163,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index bbaa1200b..f3c20cb86 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mipsle && linux // +build mipsle,linux package unix @@ -132,6 +133,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -164,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go new file mode 100644 index 000000000..885d27950 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -0,0 +1,639 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc && linux +// +build ppc,linux + +package unix + +const ( + SizeofPtr = 0x4 + SizeofLong = 0x4 +) + +type ( + _C_long int32 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Timex struct { + Modes uint32 + Offset int32 + Freq int32 + Maxerror int32 + Esterror int32 + Status int32 + Constant int32 + Precision int32 + Tolerance int32 + Time Timeval + Tick int32 + Ppsfreq int32 + Jitter int32 + Shift int32 + Stabil int32 + Jitcnt int32 + Calcnt int32 + Errcnt int32 + Stbcnt int32 + Tai int32 + _ [44]byte +} + +type Time_t int32 + +type Tms struct { + Utime int32 + Stime int32 + Cutime int32 + Cstime int32 +} + +type Utimbuf struct { + Actime int32 + Modtime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint16 + _ [4]byte + Size int64 + Blksize int32 + _ [4]byte + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ uint32 + _ uint32 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + +const ( + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]uint8 +} + +type Iovec struct { + Base *byte + Len uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +const ( + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc +) + +const ( + SizeofSockFprog = 0x8 +) + +type PtraceRegs struct { + Gpr [32]uint32 + Nip uint32 + Msr uint32 + Orig_gpr3 uint32 + Ctr uint32 + Link uint32 + Xer uint32 + Ccr uint32 + Mq uint32 + Trap uint32 + Dar uint32 + Dsisr uint32 + Result uint32 +} + +type FdSet struct { + Bits [32]int32 +} + +type Sysinfo_t struct { + Uptime int32 + Loads [3]uint32 + Totalram uint32 + Freeram uint32 + Sharedram uint32 + Bufferram uint32 + Totalswap uint32 + Freeswap uint32 + Procs uint16 + Pad uint16 + Totalhigh uint32 + Freehigh uint32 + Unit uint32 + _ [8]uint8 +} + +type Ustat_t struct { + Tfree int32 + Tinode uint32 + Fname [6]uint8 + Fpack [6]uint8 +} + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +const ( + POLLRDHUP = 0x2000 +) + +type Sigset_t struct { + Val [32]uint32 +} + +const _C__NSIG = 0x41 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [19]uint8 + Line uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Taskstats struct { + Version uint16 + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [4]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]uint8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 + Thrashing_count uint64 + Thrashing_delay_total uint64 + Ac_btime64 uint64 +} + +type cpuMask uint32 + +const ( + _NCPUBITS = 0x20 +) + +const ( + CBitFieldMaskBit0 = 0x8000000000000000 + CBitFieldMaskBit1 = 0x4000000000000000 + CBitFieldMaskBit2 = 0x2000000000000000 + CBitFieldMaskBit3 = 0x1000000000000000 + CBitFieldMaskBit4 = 0x800000000000000 + CBitFieldMaskBit5 = 0x400000000000000 + CBitFieldMaskBit6 = 0x200000000000000 + CBitFieldMaskBit7 = 0x100000000000000 + CBitFieldMaskBit8 = 0x80000000000000 + CBitFieldMaskBit9 = 0x40000000000000 + CBitFieldMaskBit10 = 0x20000000000000 + CBitFieldMaskBit11 = 0x10000000000000 + CBitFieldMaskBit12 = 0x8000000000000 + CBitFieldMaskBit13 = 0x4000000000000 + CBitFieldMaskBit14 = 0x2000000000000 + CBitFieldMaskBit15 = 0x1000000000000 + CBitFieldMaskBit16 = 0x800000000000 + CBitFieldMaskBit17 = 0x400000000000 + CBitFieldMaskBit18 = 0x200000000000 + CBitFieldMaskBit19 = 0x100000000000 + CBitFieldMaskBit20 = 0x80000000000 + CBitFieldMaskBit21 = 0x40000000000 + CBitFieldMaskBit22 = 0x20000000000 + CBitFieldMaskBit23 = 0x10000000000 + CBitFieldMaskBit24 = 0x8000000000 + CBitFieldMaskBit25 = 0x4000000000 + CBitFieldMaskBit26 = 0x2000000000 + CBitFieldMaskBit27 = 0x1000000000 + CBitFieldMaskBit28 = 0x800000000 + CBitFieldMaskBit29 = 0x400000000 + CBitFieldMaskBit30 = 0x200000000 + CBitFieldMaskBit31 = 0x100000000 + CBitFieldMaskBit32 = 0x80000000 + CBitFieldMaskBit33 = 0x40000000 + CBitFieldMaskBit34 = 0x20000000 + CBitFieldMaskBit35 = 0x10000000 + CBitFieldMaskBit36 = 0x8000000 + CBitFieldMaskBit37 = 0x4000000 + CBitFieldMaskBit38 = 0x2000000 + CBitFieldMaskBit39 = 0x1000000 + CBitFieldMaskBit40 = 0x800000 + CBitFieldMaskBit41 = 0x400000 + CBitFieldMaskBit42 = 0x200000 + CBitFieldMaskBit43 = 0x100000 + CBitFieldMaskBit44 = 0x80000 + CBitFieldMaskBit45 = 0x40000 + CBitFieldMaskBit46 = 0x20000 + CBitFieldMaskBit47 = 0x10000 + CBitFieldMaskBit48 = 0x8000 + CBitFieldMaskBit49 = 0x4000 + CBitFieldMaskBit50 = 0x2000 + CBitFieldMaskBit51 = 0x1000 + CBitFieldMaskBit52 = 0x800 + CBitFieldMaskBit53 = 0x400 + CBitFieldMaskBit54 = 0x200 + CBitFieldMaskBit55 = 0x100 + CBitFieldMaskBit56 = 0x80 + CBitFieldMaskBit57 = 0x40 + CBitFieldMaskBit58 = 0x20 + CBitFieldMaskBit59 = 0x10 + CBitFieldMaskBit60 = 0x8 + CBitFieldMaskBit61 = 0x4 + CBitFieldMaskBit62 = 0x2 + CBitFieldMaskBit63 = 0x1 +) + +type SockaddrStorage struct { + Family uint16 + _ [122]uint8 + _ uint32 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint32 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int32 + Frsize int32 + Flags int32 + Spare [4]int32 + _ [4]byte +} + +type TpacketHdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 +} + +const ( + SizeofTpacketHdr = 0x18 +) + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int32 +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x20001269 +) + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + +type CryptoUserAlg struct { + Name [64]uint8 + Driver_name [64]uint8 + Module_name [64]uint8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]uint8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]uint8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]uint8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]uint8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]uint8 +} + +type CryptoReportLarval struct { + Type [64]uint8 +} + +type CryptoReportHash struct { + Type [64]uint8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]uint8 +} + +type CryptoReportRNG struct { + Type [64]uint8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]uint8 +} + +type CryptoReportKPP struct { + Type [64]uint8 +} + +type CryptoReportAcomp struct { + Type [64]uint8 +} + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint32 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]uint8 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400470a1 + PPS_SETPARAMS = 0x800470a2 + PPS_GETCAP = 0x400470a3 + PPS_FETCH = 0xc00470a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 0e6e8a774..a94eb8e18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && linux // +build ppc64,linux package unix @@ -131,6 +132,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -164,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 7382f385f..659e32ebd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64le && linux // +build ppc64le,linux package unix @@ -131,6 +132,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -164,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 28d552216..ab8ec604f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build riscv64 && linux // +build riscv64,linux package unix @@ -130,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -163,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index a91a7a44b..3ec08237f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build s390x && linux // +build s390x,linux package unix @@ -129,6 +130,17 @@ const ( FADV_NOREUSE = 0x7 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -162,9 +174,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index f824b2358..23d474470 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build sparc64 && linux // +build sparc64,linux package unix @@ -133,6 +134,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -166,9 +178,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 3f11f88e3..2fd2060e6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && netbsd // +build 386,netbsd package unix @@ -444,8 +445,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 0bed83af5..6a5a1a8ae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && netbsd // +build amd64,netbsd package unix @@ -452,8 +453,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index e4e3bf736..84cc8d01e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && netbsd // +build arm,netbsd package unix @@ -449,8 +450,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index efac861bb..c844e7096 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && netbsd // +build arm64,netbsd package unix @@ -452,8 +453,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 80fa295f1..2a8b1e6f7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -1,6 +1,7 @@ // cgo -godefs types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd package unix @@ -437,8 +438,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 560dd6d08..b1759cf70 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd package unix @@ -437,8 +438,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 0c1700fa4..e807de206 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd package unix @@ -438,8 +439,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 5b3e46633..ff3aecaee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd package unix @@ -431,8 +432,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 62bff1670..9ecda6917 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd package unix @@ -431,8 +432,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index ca512aff7..85effef9c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_solaris.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && solaris // +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go new file mode 100644 index 000000000..4ab638cb9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -0,0 +1,406 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +// Hand edited based on ztypes_linux_s390x.go +// TODO: auto-generate. + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 + PathMax = 0x1000 +) + +const ( + SizeofSockaddrAny = 128 + SizeofCmsghdr = 12 + SizeofIPMreq = 8 + SizeofIPv6Mreq = 20 + SizeofICMPv6Filter = 32 + SizeofIPv6MTUInfo = 32 + SizeofLinger = 8 + SizeofSockaddrInet4 = 16 + SizeofSockaddrInet6 = 28 + SizeofTCPInfo = 0x68 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type timeval_zos struct { //correct (with padding and all) + Sec int64 + _ [4]byte // pad + Usec int32 +} + +type Tms struct { //clock_t is 4-byte unsigned int in zos + Utime uint32 + Stime uint32 + Cutime uint32 + Cstime uint32 +} + +type Time_t int64 + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [108]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + _ [112]uint8 // pad +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Iov *Iovec + Control *byte + Flags int32 + Namelen int32 + Iovlen int32 + Controllen int32 +} + +type Cmsghdr struct { + Len int32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Addr [4]byte /* in_addr */ + Ifindex uint32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +type _Gid_t uint32 + +type rusage_zos struct { + Utime timeval_zos + Stime timeval_zos +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +// { int, short, short } in poll.h +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +type Stat_t struct { //Linux Definition + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + _ int32 + Rdev uint64 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize int64 + Blocks int64 + _ [3]int64 +} + +type Stat_LE_t struct { + _ [4]byte // eye catcher + Length uint16 + Version uint16 + Mode int32 + Ino uint32 + Dev uint32 + Nlink int32 + Uid int32 + Gid int32 + Size int64 + Atim31 [4]byte + Mtim31 [4]byte + Ctim31 [4]byte + Rdev uint32 + Auditoraudit uint32 + Useraudit uint32 + Blksize int32 + Creatim31 [4]byte + AuditID [16]byte + _ [4]byte // rsrvd1 + File_tag struct { + Ccsid uint16 + Txtflag uint16 // aggregating Txflag:1 deferred:1 rsvflags:14 + } + CharsetID [8]byte + Blocks int64 + Genvalue uint32 + Reftim31 [4]byte + Fid [8]byte + Filefmt byte + Fspflag2 byte + _ [2]byte // rsrvd2 + Ctimemsec int32 + Seclabel [8]byte + _ [4]byte // rsrvd3 + _ [4]byte // rsrvd4 + Atim Time_t + Mtim Time_t + Ctim Time_t + Creatim Time_t + Reftim Time_t + _ [24]byte // rsrvd5 +} + +type Statvfs_t struct { + ID [4]byte + Len int32 + Bsize uint64 + Blocks uint64 + Usedspace uint64 + Bavail uint64 + Flag uint64 + Maxfilesize int64 + _ [16]byte + Frsize uint64 + Bfree uint64 + Files uint32 + Ffree uint32 + Favail uint32 + Namemax31 uint32 + Invarsec uint32 + _ [4]byte + Fsid uint64 + Namemax uint64 +} + +type Statfs_t struct { + Type uint32 + Bsize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint32 + Ffree uint32 + Fsid uint64 + Namelen uint64 + Frsize uint64 + Flags uint64 +} + +type Dirent struct { + Reclen uint16 + Namlen uint16 + Ino uint32 + Extra uintptr + Name [256]byte +} + +type FdSet struct { + Bits [64]int32 +} + +// This struct is packed on z/OS so it can't be used directly. +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 +} + +type Termios struct { + Cflag uint32 + Iflag uint32 + Lflag uint32 + Oflag uint32 + Cc [11]uint8 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type W_Mnth struct { + Hid [4]byte + Size int32 + Cur1 int32 //32bit pointer + Cur2 int32 //^ + Devno uint32 + _ [4]byte +} + +type W_Mntent struct { + Fstype uint32 + Mode uint32 + Dev uint32 + Parentdev uint32 + Rootino uint32 + Status byte + Ddname [9]byte + Fstname [9]byte + Fsname [45]byte + Pathlen uint32 + Mountpoint [1024]byte + Jobname [8]byte + PID int32 + Parmoffset int32 + Parmlen int16 + Owner [8]byte + Quiesceowner [8]byte + _ [38]byte +} diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s index 69309e4da..fdbbbcd31 100644 --- a/vendor/golang.org/x/sys/windows/empty.s +++ b/vendor/golang.org/x/sys/windows/empty.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 // This file is here to allow bodyless functions with go:linkname for Go 1.11 diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 3606c3a8b..7a11e83b7 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -6,6 +6,13 @@ package windows +import ( + errorspkg "errors" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + // EscapeArg rewrites command line argument s as prescribed // in http://msdn.microsoft.com/en-us/library/ms880421. // This function returns "" (2 double quotes) if s is empty. @@ -73,6 +80,40 @@ func EscapeArg(s string) string { return string(qs[:j]) } +// ComposeCommandLine escapes and joins the given arguments suitable for use as a Windows command line, +// in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, +// or any program that uses CommandLineToArgv. +func ComposeCommandLine(args []string) string { + var commandLine string + for i := range args { + if i > 0 { + commandLine += " " + } + commandLine += EscapeArg(args[i]) + } + return commandLine +} + +// DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, +// as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that +// command lines are passed around. +func DecomposeCommandLine(commandLine string) ([]string, error) { + if len(commandLine) == 0 { + return []string{}, nil + } + var argc int32 + argv, err := CommandLineToArgv(StringToUTF16Ptr(commandLine), &argc) + if err != nil { + return nil, err + } + defer LocalFree(Handle(unsafe.Pointer(argv))) + var args []string + for _, v := range (*argv)[:argc] { + args = append(args, UTF16ToString((*v)[:])) + } + return args, nil +} + func CloseOnExec(fd Handle) { SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) } @@ -95,3 +136,60 @@ func FullPath(name string) (path string, err error) { } } } + +// NewProcThreadAttributeList allocates a new ProcThreadAttributeListContainer, with the requested maximum number of attributes. +func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListContainer, error) { + var size uintptr + err := initializeProcThreadAttributeList(nil, maxAttrCount, 0, &size) + if err != ERROR_INSUFFICIENT_BUFFER { + if err == nil { + return nil, errorspkg.New("unable to query buffer size from InitializeProcThreadAttributeList") + } + return nil, err + } + // size is guaranteed to be ≥1 by InitializeProcThreadAttributeList. + al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(&make([]byte, size)[0]))} + err = initializeProcThreadAttributeList(al.data, maxAttrCount, 0, &size) + if err != nil { + return nil, err + } + return al, err +} + +// Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute. +// Note that the value passed to this function will be copied into memory +// allocated by LocalAlloc, the contents of which should not contain any +// Go-managed pointers, even if the passed value itself is a Go-managed +// pointer. +func (al *ProcThreadAttributeListContainer) Update(attribute uintptr, value unsafe.Pointer, size uintptr) error { + alloc, err := LocalAlloc(LMEM_FIXED, uint32(size)) + if err != nil { + return err + } + var src, dst []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&src)) + hdr.Data = value + hdr.Cap = int(size) + hdr.Len = int(size) + hdr = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) + hdr.Data = unsafe.Pointer(alloc) + hdr.Cap = int(size) + hdr.Len = int(size) + copy(dst, src) + al.heapAllocations = append(al.heapAllocations, alloc) + return updateProcThreadAttribute(al.data, 0, attribute, unsafe.Pointer(alloc), size, nil, nil) +} + +// Delete frees ProcThreadAttributeList's resources. +func (al *ProcThreadAttributeListContainer) Delete() { + deleteProcThreadAttributeList(al.data) + for i := range al.heapAllocations { + LocalFree(Handle(al.heapAllocations[i])) + } + al.heapAllocations = nil +} + +// List returns the actual ProcThreadAttributeList to be passed to StartupInfoEx. +func (al *ProcThreadAttributeListContainer) List() *ProcThreadAttributeList { + return al.data +} diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash index 2163843a1..58e0188fb 100644 --- a/vendor/golang.org/x/sys/windows/mkerrors.bash +++ b/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -9,6 +9,8 @@ shopt -s nullglob winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" [[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } +ntstatus="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/ntstatus.h | sort -Vr | head -n 1)" +[[ -n $ntstatus ]] || { echo "Unable to find ntstatus.h" >&2; exit 1; } declare -A errors @@ -59,5 +61,10 @@ declare -A errors echo "$key $vtype = $value" done < "$winerror" + while read -r line; do + [[ $line =~ ^#define\ (STATUS_[^\s]+)\ +\(\(NTSTATUS\)((0x)?[0-9a-fA-F]+)L?\) ]] || continue + echo "${BASH_REMATCH[1]} NTStatus = ${BASH_REMATCH[2]}" + done < "$ntstatus" + echo ")" } | gofmt > "zerrors_windows.go" diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 69eb462c5..111c10d3a 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -908,6 +908,19 @@ type SECURITY_DESCRIPTOR struct { dacl *ACL } +type SECURITY_QUALITY_OF_SERVICE struct { + Length uint32 + ImpersonationLevel uint32 + ContextTrackingMode byte + EffectiveOnly byte +} + +// Constants for the ContextTrackingMode field of SECURITY_QUALITY_OF_SERVICE. +const ( + SECURITY_STATIC_TRACKING = 0 + SECURITY_DYNAMIC_TRACKING = 1 +) + type SecurityAttributes struct { Length uint32 SecurityDescriptor *SECURITY_DESCRIPTOR @@ -1321,7 +1334,11 @@ func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURIT } func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { - sdLen := (int)(selfRelativeSD.Length()) + sdLen := int(selfRelativeSD.Length()) + const min = int(unsafe.Sizeof(SECURITY_DESCRIPTOR{})) + if sdLen < min { + sdLen = min + } var src []byte h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) @@ -1329,7 +1346,15 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() h.Len = sdLen h.Cap = sdLen - dst := make([]byte, sdLen) + const psize = int(unsafe.Sizeof(uintptr(0))) + + var dst []byte + h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) + alloc := make([]uintptr, (sdLen+psize-1)/psize) + h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data + h.Len = sdLen + h.Cap = sdLen + copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 0197df872..1215b2ae2 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -8,6 +8,8 @@ package windows import ( errorspkg "errors" + "fmt" + "runtime" "sync" "syscall" "time" @@ -65,9 +67,8 @@ const ( LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 - // Return values of SleepEx and other APC functions - STATUS_USER_APC = 0x000000C0 - WAIT_IO_COMPLETION = STATUS_USER_APC + // Return value of SleepEx and other APC functions + WAIT_IO_COMPLETION = 0x000000C0 ) // StringToUTF16 is deprecated. Use UTF16FromString instead. @@ -180,6 +181,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process //sys IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2? //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW +//sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) +//sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState //sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) @@ -208,12 +214,16 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] -//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) -//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) -//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) +//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) +//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) +//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) //sys CancelIo(s Handle) (err error) //sys CancelIoEx(s Handle, o *Overlapped) (err error) //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW +//sys CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = advapi32.CreateProcessAsUserW +//sys initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) = InitializeProcThreadAttributeList +//sys deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) = DeleteProcThreadAttributeList +//sys updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) = UpdateProcThreadAttribute //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId @@ -248,13 +258,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW //sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW //sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] +//sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) //sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) //sys FlushFileBuffers(handle Handle) (err error) //sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW //sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW //sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW //sys GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW -//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW +//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateFileMappingW //sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) //sys UnmapViewOfFile(addr uintptr) (err error) //sys FlushViewOfFile(addr uintptr, length uintptr) (err error) @@ -283,6 +294,9 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy //sys CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) = crypt32.CertGetNameStringW //sys CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) = crypt32.CertFindExtension +//sys CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) [failretval==nil] = crypt32.CertFindCertificateInStore +//sys CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) [failretval==nil] = crypt32.CertFindChainInStore +//sys CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) = crypt32.CryptAcquireCertificatePrivateKey //sys CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) = crypt32.CryptQueryObject //sys CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) = crypt32.CryptDecodeObject //sys CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptProtectData @@ -312,14 +326,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW //sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW //sys GetCurrentThreadId() (id uint32) -//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW -//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventW +//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventExW //sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW //sys SetEvent(event Handle) (err error) = kernel32.SetEvent //sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent //sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent -//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) = kernel32.CreateMutexW -//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateMutexExW +//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexW +//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexExW //sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW //sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex //sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx @@ -334,10 +348,13 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) //sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) //sys GetProcessId(process Handle) (id uint32, err error) +//sys QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) = kernel32.QueryFullProcessImageNameW //sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -367,16 +384,36 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 //sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid //sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree -//sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion -//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers +//sys CoInitializeEx(reserved uintptr, coInit uint32) (ret error) = ole32.CoInitializeEx +//sys CoUninitialize() = ole32.CoUninitialize +//sys CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) = ole32.CoGetObject //sys getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetProcessPreferredUILanguages //sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages //sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages //sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages +//sys findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) = kernel32.FindResourceW +//sys SizeofResource(module Handle, resInfo Handle) (size uint32, err error) = kernel32.SizeofResource +//sys LoadResource(module Handle, resInfo Handle) (resData Handle, err error) = kernel32.LoadResource +//sys LockResource(resData Handle) (addr uintptr, err error) = kernel32.LockResource // Process Status API (PSAPI) //sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +// NT Native APIs +//sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) = ntdll.RtlGetVersion +//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers +//sys RtlGetCurrentPeb() (peb *PEB) = ntdll.RtlGetCurrentPeb +//sys RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) = ntdll.RtlInitUnicodeString +//sys RtlInitString(destinationString *NTString, sourceString *byte) = ntdll.RtlInitString +//sys NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile +//sys NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) = ntdll.NtCreateNamedPipeFile +//sys RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToNtPathName_U_WithStatus +//sys RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToRelativeNtPathName_U_WithStatus +//sys RtlDefaultNpAcl(acl **ACL) (ntstatus error) = ntdll.RtlDefaultNpAcl +//sys NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQueryInformationProcess +//sys NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) = ntdll.NtSetInformationProcess + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -770,6 +807,7 @@ const socket_error = uintptr(^uint32(0)) //sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend //sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom //sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo +//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW //sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname //sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname //sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs @@ -783,6 +821,7 @@ const socket_error = uintptr(^uint32(0)) //sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo //sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes //sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW +//sys WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult //sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar @@ -1505,3 +1544,129 @@ func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } + +func (s NTStatus) Errno() syscall.Errno { + return rtlNtStatusToDosErrorNoTeb(s) +} + +func langID(pri, sub uint16) uint32 { return uint32(sub)<<10 | uint32(pri) } + +func (s NTStatus) Error() string { + b := make([]uint16, 300) + n, err := FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_FROM_HMODULE|FORMAT_MESSAGE_ARGUMENT_ARRAY, modntdll.Handle(), uint32(s), langID(LANG_ENGLISH, SUBLANG_ENGLISH_US), b, nil) + if err != nil { + return fmt.Sprintf("NTSTATUS 0x%08x", uint32(s)) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} + +// NewNTUnicodeString returns a new NTUnicodeString structure for use with native +// NT APIs that work over the NTUnicodeString type. Note that most Windows APIs +// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTUnicodeString(s string) (*NTUnicodeString, error) { + var u NTUnicodeString + s16, err := UTF16PtrFromString(s) + if err != nil { + return nil, err + } + RtlInitUnicodeString(&u, s16) + return &u, nil +} + +// Slice returns a uint16 slice that aliases the data in the NTUnicodeString. +func (s *NTUnicodeString) Slice() []uint16 { + var slice []uint16 + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTUnicodeString) String() string { + return UTF16ToString(s.Slice()) +} + +// NewNTString returns a new NTString structure for use with native +// NT APIs that work over the NTString type. Note that most Windows APIs +// do not use NTString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTString(s string) (*NTString, error) { + var nts NTString + s8, err := BytePtrFromString(s) + if err != nil { + return nil, err + } + RtlInitString(&nts, s8) + return &nts, nil +} + +// Slice returns a byte slice that aliases the data in the NTString. +func (s *NTString) Slice() []byte { + var slice []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTString) String() string { + return ByteSliceToString(s.Slice()) +} + +// FindResource resolves a resource of the given name and resource type. +func FindResource(module Handle, name, resType ResourceIDOrString) (Handle, error) { + var namePtr, resTypePtr uintptr + var name16, resType16 *uint16 + var err error + resolvePtr := func(i interface{}, keep **uint16) (uintptr, error) { + switch v := i.(type) { + case string: + *keep, err = UTF16PtrFromString(v) + if err != nil { + return 0, err + } + return uintptr(unsafe.Pointer(*keep)), nil + case ResourceID: + return uintptr(v), nil + } + return 0, errorspkg.New("parameter must be a ResourceID or a string") + } + namePtr, err = resolvePtr(name, &name16) + if err != nil { + return 0, err + } + resTypePtr, err = resolvePtr(resType, &resType16) + if err != nil { + return 0, err + } + resInfo, err := findResource(module, namePtr, resTypePtr) + runtime.KeepAlive(name16) + runtime.KeepAlive(resType16) + return resInfo, err +} + +func LoadResourceData(module, resInfo Handle) (data []byte, err error) { + size, err := SizeofResource(module, resInfo) + if err != nil { + return + } + resData, err := LoadResource(module, resInfo) + if err != nil { + return + } + ptr, err := LockResource(resData) + if err != nil { + return + } + h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) + h.Data = unsafe.Pointer(ptr) + h.Len = int(size) + h.Cap = int(size) + return +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index fd4260762..1f733398e 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -10,6 +10,10 @@ import ( "unsafe" ) +// NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and +// other native functions. +type NTStatus uint32 + const ( // Invented values to support what package os expects. O_RDONLY = 0x00000 @@ -215,6 +219,18 @@ const ( INHERIT_PARENT_AFFINITY = 0x00010000 ) +const ( + // attributes for ProcThreadAttributeList + PROC_THREAD_ATTRIBUTE_PARENT_PROCESS = 0x00020000 + PROC_THREAD_ATTRIBUTE_HANDLE_LIST = 0x00020002 + PROC_THREAD_ATTRIBUTE_GROUP_AFFINITY = 0x00030003 + PROC_THREAD_ATTRIBUTE_PREFERRED_NODE = 0x00020004 + PROC_THREAD_ATTRIBUTE_IDEAL_PROCESSOR = 0x00030005 + PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 + PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 + PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b +) + const ( // flags for CreateToolhelp32Snapshot TH32CS_SNAPHEAPLIST = 0x01 @@ -287,6 +303,23 @@ const ( PKCS12_NO_PERSIST_KEY = 0x00008000 PKCS12_INCLUDE_EXTENDED_PROPERTIES = 0x00000010 + /* Flags for CryptAcquireCertificatePrivateKey */ + CRYPT_ACQUIRE_CACHE_FLAG = 0x00000001 + CRYPT_ACQUIRE_USE_PROV_INFO_FLAG = 0x00000002 + CRYPT_ACQUIRE_COMPARE_KEY_FLAG = 0x00000004 + CRYPT_ACQUIRE_NO_HEALING = 0x00000008 + CRYPT_ACQUIRE_SILENT_FLAG = 0x00000040 + CRYPT_ACQUIRE_WINDOW_HANDLE_FLAG = 0x00000080 + CRYPT_ACQUIRE_NCRYPT_KEY_FLAGS_MASK = 0x00070000 + CRYPT_ACQUIRE_ALLOW_NCRYPT_KEY_FLAG = 0x00010000 + CRYPT_ACQUIRE_PREFER_NCRYPT_KEY_FLAG = 0x00020000 + CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG = 0x00040000 + + /* pdwKeySpec for CryptAcquireCertificatePrivateKey */ + AT_KEYEXCHANGE = 1 + AT_SIGNATURE = 2 + CERT_NCRYPT_KEY_SPEC = 0xFFFFFFFF + /* Default usage match type is AND with value zero */ USAGE_MATCH_TYPE_AND = 0 USAGE_MATCH_TYPE_OR = 1 @@ -412,6 +445,89 @@ const ( CERT_TRUST_IS_CA_TRUSTED = 0x00004000 CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000 + /* Certificate Information Flags */ + CERT_INFO_VERSION_FLAG = 1 + CERT_INFO_SERIAL_NUMBER_FLAG = 2 + CERT_INFO_SIGNATURE_ALGORITHM_FLAG = 3 + CERT_INFO_ISSUER_FLAG = 4 + CERT_INFO_NOT_BEFORE_FLAG = 5 + CERT_INFO_NOT_AFTER_FLAG = 6 + CERT_INFO_SUBJECT_FLAG = 7 + CERT_INFO_SUBJECT_PUBLIC_KEY_INFO_FLAG = 8 + CERT_INFO_ISSUER_UNIQUE_ID_FLAG = 9 + CERT_INFO_SUBJECT_UNIQUE_ID_FLAG = 10 + CERT_INFO_EXTENSION_FLAG = 11 + + /* dwFindType for CertFindCertificateInStore */ + CERT_COMPARE_MASK = 0xFFFF + CERT_COMPARE_SHIFT = 16 + CERT_COMPARE_ANY = 0 + CERT_COMPARE_SHA1_HASH = 1 + CERT_COMPARE_NAME = 2 + CERT_COMPARE_ATTR = 3 + CERT_COMPARE_MD5_HASH = 4 + CERT_COMPARE_PROPERTY = 5 + CERT_COMPARE_PUBLIC_KEY = 6 + CERT_COMPARE_HASH = CERT_COMPARE_SHA1_HASH + CERT_COMPARE_NAME_STR_A = 7 + CERT_COMPARE_NAME_STR_W = 8 + CERT_COMPARE_KEY_SPEC = 9 + CERT_COMPARE_ENHKEY_USAGE = 10 + CERT_COMPARE_CTL_USAGE = CERT_COMPARE_ENHKEY_USAGE + CERT_COMPARE_SUBJECT_CERT = 11 + CERT_COMPARE_ISSUER_OF = 12 + CERT_COMPARE_EXISTING = 13 + CERT_COMPARE_SIGNATURE_HASH = 14 + CERT_COMPARE_KEY_IDENTIFIER = 15 + CERT_COMPARE_CERT_ID = 16 + CERT_COMPARE_CROSS_CERT_DIST_POINTS = 17 + CERT_COMPARE_PUBKEY_MD5_HASH = 18 + CERT_COMPARE_SUBJECT_INFO_ACCESS = 19 + CERT_COMPARE_HASH_STR = 20 + CERT_COMPARE_HAS_PRIVATE_KEY = 21 + CERT_FIND_ANY = (CERT_COMPARE_ANY << CERT_COMPARE_SHIFT) + CERT_FIND_SHA1_HASH = (CERT_COMPARE_SHA1_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_MD5_HASH = (CERT_COMPARE_MD5_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_SIGNATURE_HASH = (CERT_COMPARE_SIGNATURE_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_KEY_IDENTIFIER = (CERT_COMPARE_KEY_IDENTIFIER << CERT_COMPARE_SHIFT) + CERT_FIND_HASH = CERT_FIND_SHA1_HASH + CERT_FIND_PROPERTY = (CERT_COMPARE_PROPERTY << CERT_COMPARE_SHIFT) + CERT_FIND_PUBLIC_KEY = (CERT_COMPARE_PUBLIC_KEY << CERT_COMPARE_SHIFT) + CERT_FIND_SUBJECT_NAME = (CERT_COMPARE_NAME< backoff -> try_on_all_addresses. +// Each SubConn contains a list of addresses. // -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. // -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. @@ -326,6 +328,20 @@ type Balancer interface { Close() } +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + // SubConnState describes the state of a SubConn. type SubConnState struct { // ConnectivityState is the connectivity state of the SubConn. @@ -353,8 +369,10 @@ var ErrBadResolverState = errors.New("bad resolver state") // // It's not thread safe. type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. } // RecordTransition records state change happening in subConn and based on that @@ -362,9 +380,11 @@ type ConnectivityStateEvaluator struct { // // - If at least one SubConn in Ready, the aggregated state is Ready; // - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else the aggregated state is TransientFailure. +// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else there are no subconns and the aggregated state is Transient Failure // -// Idle and Shutdown are not considered. +// Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { // Update counters. for idx, state := range []connectivity.State{oldState, newState} { @@ -374,6 +394,10 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numReady += updateVal case connectivity.Connecting: cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal } } @@ -384,5 +408,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne if cse.numConnecting > 0 { return connectivity.Connecting } + if cse.numTransientFailure > 0 { + return connectivity.TransientFailure + } + if cse.numIdle > 0 { + return connectivity.Idle + } return connectivity.TransientFailure } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index c883efa0b..8dd504299 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -133,6 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() } else { // Always update the subconn's address in case the attributes @@ -213,10 +214,14 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } - if oldS == connectivity.TransientFailure && s == connectivity.Connecting { - // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or // CONNECTING transitions to prevent the aggregated state from being // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } return } b.scStates[sc] = s @@ -242,7 +247,6 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su b.state == connectivity.TransientFailure { b.regeneratePicker() } - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } @@ -251,6 +255,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + // NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 43c2a1537..274eb2f85 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -47,11 +47,11 @@ func init() { type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: newPicker called with info: %v", info) + logger.Infof("roundrobinPicker: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } - var scs []balancer.SubConn + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) for sc := range info.ReadySCs { scs = append(scs, sc) } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 41061d6d3..f4ea61746 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -37,14 +37,20 @@ type scStateUpdate struct { err error } +// exitIdle contains no data and is just a signal sent on the updateCh in +// ccBalancerWrapper to instruct the balancer to exit idle. +type exitIdle struct{} + // ccBalancerWrapper is a wrapper on top of cc for balancers. // It implements balancer.ClientConn interface. type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - scBuffer *buffer.Unbounded - done *grpcsync.Event + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + hasExitIdle bool + updateCh *buffer.Unbounded + closed *grpcsync.Event + done *grpcsync.Event mu sync.Mutex subConns map[*acBalancerWrapper]struct{} @@ -53,12 +59,14 @@ type ccBalancerWrapper struct { func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, - scBuffer: buffer.NewUnbounded(), + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() ccb.balancer = b.Build(ccb, bopts) + _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) return ccb } @@ -67,35 +75,72 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.scBuffer.Get(): - ccb.scBuffer.Load() - if ccb.done.HasFired() { + case t := <-ccb.updateCh.Get(): + ccb.updateCh.Load() + if ccb.closed.HasFired() { break } - ccb.balancerMu.Lock() - su := t.(*scStateUpdate) - ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - ccb.balancerMu.Unlock() - case <-ccb.done.Done(): + switch u := t.(type) { + case *scStateUpdate: + ccb.balancerMu.Lock() + ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) + ccb.balancerMu.Unlock() + case *acBalancerWrapper: + ccb.mu.Lock() + if ccb.subConns != nil { + delete(ccb.subConns, u) + ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) + } + ccb.mu.Unlock() + case exitIdle: + if ccb.cc.GetState() == connectivity.Idle { + if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { + // We already checked that the balancer implements + // ExitIdle before pushing the event to updateCh, but + // check conditionally again as defensive programming. + ccb.balancerMu.Lock() + ei.ExitIdle() + ccb.balancerMu.Unlock() + } + } + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + } + case <-ccb.closed.Done(): } - if ccb.done.HasFired() { + if ccb.closed.HasFired() { + ccb.balancerMu.Lock() ccb.balancer.Close() + ccb.balancerMu.Unlock() ccb.mu.Lock() scs := ccb.subConns ccb.subConns = nil ccb.mu.Unlock() + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + ccb.done.Fire() + // Fire done before removing the addr conns. We can safely unblock + // ccb.close and allow the removeAddrConns to happen + // asynchronously. for acbw := range scs { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) return } } } func (ccb *ccBalancerWrapper) close() { - ccb.done.Fire() + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) exitIdle() bool { + if !ccb.hasExitIdle { + return false + } + ccb.updateCh.Put(exitIdle{}) + return true } func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { @@ -109,7 +154,7 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co if sc == nil { return } - ccb.scBuffer.Put(&scStateUpdate{ + ccb.updateCh.Put(&scStateUpdate{ sc: sc, state: s, err: err, @@ -124,8 +169,8 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat func (ccb *ccBalancerWrapper) resolverError(err error) { ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() ccb.balancer.ResolverError(err) - ccb.balancerMu.Unlock() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { @@ -150,17 +195,10 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - delete(ccb.subConns, acbw) - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock + // during switchBalancer() if the old balancer calls RemoveSubConn() in its + // Close(). + ccb.updateCh.Put(sc) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -205,7 +243,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { acbw.mu.Lock() defer acbw.mu.Unlock() if len(addrs) <= 0 { - acbw.ac.tearDown(errConnDrain) + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) return } if !acbw.ac.tryUpdateAddrs(addrs) { @@ -220,23 +258,23 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { acbw.ac.acbw = nil acbw.ac.mu.Unlock() acState := acbw.ac.getState() - acbw.ac.tearDown(errConnDrain) + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) if acState == connectivity.Shutdown { return } - ac, err := cc.newAddrConn(addrs, opts) + newAC, err := cc.newAddrConn(addrs, opts) if err != nil { channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() + acbw.ac = newAC + newAC.mu.Lock() + newAC.acbw = acbw + newAC.mu.Unlock() if acState != connectivity.Idle { - ac.connect() + go newAC.connect() } } } @@ -244,7 +282,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { func (acbw *acBalancerWrapper) Connect() { acbw.mu.Lock() defer acbw.mu.Unlock() - acbw.ac.connect() + go acbw.ac.connect() } func (acbw *acBalancerWrapper) getAddrConn() *addrConn { diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 77a08fd33..34cc4c948 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -143,6 +143,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * firstResolveEvent: grpcsync.NewEvent(), } cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) for _, opt := range opts { @@ -321,6 +322,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { + cc.Connect() s := cc.GetState() if s == connectivity.Ready { break @@ -538,12 +540,31 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // // Experimental // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { + return + } + for ac := range cc.conns { + go ac.connect() + } +} + func (cc *ClientConn) scWatcher() { for { select { @@ -710,7 +731,12 @@ func (cc *ClientConn) switchBalancer(name string) { return } if cc.balancerWrapper != nil { + // Don't hold cc.mu while closing the balancers. The balancers may call + // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex + // would cause a deadlock in that case. + cc.mu.Unlock() cc.balancerWrapper.close() + cc.mu.Lock() } builder := balancer.Get(name) @@ -839,8 +865,7 @@ func (ac *addrConn) connect() error { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() + ac.resetTransport() return nil } @@ -877,6 +902,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // ac.state is Ready, try to find the connected address. var curAddrFound bool for _, a := range addrs { + // a.ServerName takes precedent over ClientConn authority, if present. + if a.ServerName == "" { + a.ServerName = ac.cc.authority + } if reflect.DeepEqual(ac.curAddr, a) { curAddrFound = true break @@ -1045,12 +1074,12 @@ func (cc *ClientConn) Close() error { cc.blockingpicker.close() - if rWrapper != nil { - rWrapper.close() - } if bWrapper != nil { bWrapper.close() } + if rWrapper != nil { + rWrapper.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1129,112 +1158,86 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOptions{}) - } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } + ac.updateConnectivityState(connectivity.TransientFailure, err) - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } - - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) - - ac.updateConnectivityState(connectivity.Connecting, nil) - ac.transport = nil + // Backoff. + b := ac.resetBackoff ac.mu.Unlock() - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) - if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure, err) - - // Backoff. - b := ac.resetBackoff + ac.backoffIdx++ ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return - } - continue + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return } ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close() - return + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, err) } - ac.curAddr = addr - ac.transport = newTr - ac.backoffIdx = 0 - - hctx, hcancel := context.WithCancel(ac.ctx) - ac.startHealthCheck(hctx) ac.mu.Unlock() - - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - // restart connecting - the top of the loop will set state to - // CONNECTING. This is against the current connectivity semantics doc, - // however it allows for graceful behavior for RPCs not yet dispatched - // - unfortunate timing would otherwise lead to the RPC failing even - // though the TRANSIENT_FAILURE state (called for by the doc) would be - // instantaneous. - // - // Ideally we should transition to Idle here and block until there is - // RPC activity that leads to the balancer requesting a reconnect of - // the associated SubConn. + return } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing + return errConnClosing } ac.cc.mu.RLock() @@ -1249,9 +1252,9 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(addr, copts, connectDeadline) if err == nil { - return newTr, addr, reconnect, nil + return nil } if firstConnErr == nil { firstConnErr = err @@ -1260,57 +1263,54 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, firstConnErr + return firstConnErr } -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) - onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + // TODO: Delete prefaceReceived and move the logic to wait for it into the + // transport. + prefaceReceived := grpcsync.NewEvent() + connClosed := grpcsync.NewEvent() // addr.ServerName takes precedent over ClientConn authority, if present. if addr.ServerName == "" { addr.ServerName = ac.cc.authority } - once := sync.Once{} - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - reconnect.Fire() - } + hctx, hcancel := context.WithCancel(ac.ctx) + hcStarted := false // protected by ac.mu onClose := func() { ac.mu.Lock() - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - close(onCloseCalled) - reconnect.Fire() + defer ac.mu.Unlock() + defer connClosed.Fire() + if !hcStarted || hctx.Err() != nil { + // We didn't start the health check or set the state to READY, so + // no need to do anything else here. + // + // OR, we have already cancelled the health check context, meaning + // we have already called onClose once for this transport. In this + // case it would be dangerous to clear the transport and update the + // state, since there may be a new transport in this addrConn. + return + } + hcancel() + ac.transport = nil + // Refresh the name resolver + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } } - onPrefaceReceipt := func() { - close(prefaceReceived) + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + ac.mu.Unlock() + onClose() } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) @@ -1319,27 +1319,67 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne copts.ChannelzParentID = ac.channelzID } - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) - return nil, nil, err + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + return err } select { - case <-time.After(time.Until(connectDeadline)): + case <-connectCtx.Done(): // We didn't get the preface in time. - newTr.Close() - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + newTr.Close(transport.ErrConnClosing) + if connectCtx.Err() == context.DeadlineExceeded { + err := errors.New("failed to receive server preface within timeout") + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + return err + } + return nil + case <-prefaceReceived.Done(): // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + ac.mu.Lock() + defer ac.mu.Unlock() + if connClosed.HasFired() { + // onClose called first; go idle but do nothing else. + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } + return nil + } + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + go newTr.Close(transport.ErrConnClosing) + return nil + } + ac.curAddr = addr + ac.transport = newTr + hcStarted = true + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil + case <-connClosed.Done(): + // The transport has already closed. If we received the preface, too, + // this is not an error. + select { + case <-prefaceReceived.Done(): + return nil + default: + return errors.New("connection closed before server preface received") + } } - return newTr, reconnect, nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1423,33 +1463,20 @@ func (ac *addrConn) resetConnectBackoff() { ac.mu.Unlock() } -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { - t := ac.transport - ac.mu.Unlock() - return t, true - } - var idle bool - if ac.state == connectivity.Idle { - idle = true - } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport } - return nil, false + return nil } // tearDown starts to tear down the addrConn. -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many addrConn's in a -// tight loop. -// tearDown doesn't remove ac from ac.cc.conns. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. func (ac *addrConn) tearDown(err error) { ac.mu.Lock() if ac.state == connectivity.Shutdown { diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.mod b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.mod deleted file mode 100644 index d0cfd8ebf..000000000 --- a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module google.golang.org/grpc/cmd/protoc-gen-go-grpc - -go 1.9 - -require google.golang.org/protobuf v1.23.0 diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.sum b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.sum deleted file mode 100644 index 92baf2631..000000000 --- a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.sum +++ /dev/null @@ -1,18 +0,0 @@ -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go index 010156261..4a8992642 100644 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -18,7 +18,6 @@ // Package connectivity defines connectivity semantics. // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. package connectivity import ( @@ -45,7 +44,7 @@ func (s State) String() string { return "SHUTDOWN" default: logger.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" + return "INVALID_STATE" } } @@ -61,3 +60,35 @@ const ( // Shutdown indicates the ClientConn has started shutting down. Shutdown ) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index e69562e78..7eee7e4ec 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -30,7 +30,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc/attributes" - "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -188,15 +188,12 @@ type RequestInfo struct { AuthInfo AuthInfo } -// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. -type requestInfoKey struct{} - // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) - return + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok } // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes @@ -211,16 +208,12 @@ type ClientHandshakeInfo struct { Attributes *attributes.Attributes } -// clientHandshakeInfoKey is a struct used as the key to store -// ClientHandshakeInfo in a context. -type clientHandshakeInfoKey struct{} - // ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored // in ctx. // // This API is experimental. func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { - chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) return chi } @@ -249,15 +242,6 @@ func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { return nil } -func init() { - internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) - } - internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { - return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) - } -} - // ChannelzSecurityInfo defines the interface that security protocols should implement // in order to provide security info to channelz. // diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go deleted file mode 100644 index ccbf35b33..000000000 --- a/vendor/google.golang.org/grpc/credentials/go12.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build go1.12 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import "crypto/tls" - -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" -} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 8ee7124f2..784822d05 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -230,4 +230,7 @@ var cipherSuiteLookup = map[uint16]string{ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index e7f86e6d7..7a497237b 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -66,11 +66,7 @@ type dialOptions struct { minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string - // This is used by ccResolverWrapper to backoff between successive calls to - // resolver.ResolveNow(). The user will have no need to configure this, but - // we need to be able to configure this in tests. - resolveNowBackoff func(int) time.Duration - resolvers []resolver.Builder + resolvers []resolver.Builder } // DialOption configures how we set up the connection. @@ -596,7 +592,6 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, - resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, } } @@ -611,16 +606,6 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { }) } -// withResolveNowBackoff specifies the function that clientconn uses to backoff -// between successive calls to resolver.ResolveNow(). -// -// For testing purpose only. -func withResolveNowBackoff(f func(int) time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolveNowBackoff = f - }) -} - // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod deleted file mode 100644 index b177cfa66..000000000 --- a/vendor/google.golang.org/grpc/go.mod +++ /dev/null @@ -1,17 +0,0 @@ -module google.golang.org/grpc - -go 1.11 - -require ( - github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 - github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/protobuf v1.4.2 - github.com/google/go-cmp v0.5.0 - github.com/google/uuid v1.1.2 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/protobuf v1.25.0 -) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum deleted file mode 100644 index bb25cd491..000000000 --- a/vendor/google.golang.org/grpc/go.sum +++ /dev/null @@ -1,99 +0,0 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh deleted file mode 100644 index 15ff9facd..000000000 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" \ No newline at end of file diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 7d7a3056b..c2fdd58b3 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -69,7 +69,8 @@ type writerSink struct { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) @@ -85,24 +86,27 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Close() error { return nil } type bufferedSink struct { - mu sync.Mutex - closer io.Closer - out Sink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. - - writeStartOnce sync.Once - writeTicker *time.Ticker + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} } func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() return err } - fs.mu.Unlock() return nil } @@ -113,7 +117,12 @@ const ( func (fs *bufferedSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { - for range fs.writeTicker.C { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } fs.mu.Lock() if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) @@ -124,10 +133,12 @@ func (fs *bufferedSink) startFlushGoroutine() { } func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() if fs.writeTicker != nil { fs.writeTicker.Stop() } - fs.mu.Lock() + close(fs.done) if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) } @@ -137,7 +148,6 @@ func (fs *bufferedSink) Close() error { if err := fs.out.Close(); err != nil { grpclogLogger.Warningf("failed to close the Sink: %v", err) } - fs.mu.Unlock() return nil } @@ -155,5 +165,6 @@ func NewBufferedSink(o io.WriteCloser) Sink { closer: o, out: newWriterSink(bufW), buf: bufW, + done: make(chan struct{}), } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f73141393..6d5760d95 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -630,7 +630,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) if count == 0 { end = true } - var s []*SocketMetric + s := make([]*SocketMetric, 0, len(sks)) for _, ns := range sks { sm := &SocketMetric{} sm.SocketData = ns.s.ChannelzMetric() diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go index 692dd6181..1b1c4cce3 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 19c2fc521..8b06eed1a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -37,6 +38,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index fdf409d55..8d194e44e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 8864a0811..837ddc402 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 000000000..32c9b5903 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go index be70b6cdf..25ade6230 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2020 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go deleted file mode 100644 index af6f57719..000000000 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build appengine - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import ( - "crypto/tls" - "net/url" -) - -// SPIFFEIDFromState is a no-op for appengine builds. -func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go index f499a614c..2919632d6 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go deleted file mode 100644 index a6144cd66..000000000 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import ( - "net" -) - -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn -} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go index 55664fa46..f792fd22c 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/util.go +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -18,7 +18,9 @@ package credentials -import "crypto/tls" +import ( + "crypto/tls" +) const alpnProtoStrH2 = "h2" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 73931a94b..e766ac04a 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -22,6 +22,8 @@ package envconfig import ( "os" "strings" + + xdsenv "google.golang.org/grpc/internal/xds/env" ) const ( @@ -31,8 +33,8 @@ const ( ) var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on" or if XDS retry support is enabled. + Retry = strings.EqualFold(os.Getenv(retryStr), "on") || xdsenv.RetrySupport // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 200b115ca..740f83c2b 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -31,26 +31,37 @@ var ( mu sync.Mutex ) +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + // Int63n implements rand.Int63n on the grpcrand global source. func Int63n(n int64) int64 { mu.Lock() - res := r.Int63n(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Int63n(n) } // Intn implements rand.Intn on the grpcrand global source. func Intn(n int) int { mu.Lock() - res := r.Intn(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Intn(n) } // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() - res := r.Float64() - mu.Unlock() - return res + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1e2834c70..1b596bf35 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,12 +38,6 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // NewRequestInfoContext creates a new context based on the argument context attaching - // the passed in RequestInfo to the new context. - NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context - // NewClientHandshakeInfoContext returns a copy of the input context with - // the passed in ClientHandshakeInfo struct added to it. - NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context // ParseServiceConfigForTesting is for creating a fake // ClientConn for resolver testing only ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult @@ -65,6 +59,11 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index 5e7f36703..be7e13d58 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -117,9 +117,12 @@ type ClientInterceptor interface { NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) } -// ServerInterceptor is unimplementable; do not use. +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. type ServerInterceptor interface { - notDefined() + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. } type csKeyType string diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 304235566..75301c514 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -34,6 +34,7 @@ import ( grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" @@ -46,6 +47,13 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + func init() { resolver.Register(NewBuilder()) } @@ -143,7 +151,6 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts d.wg.Add(1) go d.watcher() - d.ResolveNow(resolver.ResolveNowOptions{}) return d, nil } @@ -201,28 +208,38 @@ func (d *dnsResolver) Close() { func (d *dnsResolver) watcher() { defer d.wg.Done() + backoffIndex := 1 for { - select { - case <-d.ctx.Done(): - return - case <-d.rn: - } - state, err := d.lookup() if err != nil { + // Report error to the underlying grpc.ClientConn. d.cc.ReportError(err) } else { - d.cc.UpdateState(*state) + err = d.cc.UpdateState(*state) } - // Sleep to prevent excessive re-resolutions. Incoming resolution requests - // will be queued in d.rn. - t := time.NewTimer(minDNSResRate) + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least + // to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } select { - case <-t.C: case <-d.ctx.Done(): - t.Stop() + timer.Stop() return + case <-timer.C: } } } @@ -260,18 +277,13 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { return newAddrs, nil } -var filterError = func(err error) error { +func handleDNSError(err error, lookupType string) error { if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). return nil } - return err -} - -func handleDNSError(err error, lookupType string) error { - err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) logger.Info(err) @@ -306,12 +318,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { } func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err } + newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { ip, ok := formatIP(a) if !ok { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go deleted file mode 100644 index 8783a8cf8..000000000 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package dns - -import "net" - -func init() { - filterError = func(err error) error { - if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { - // The name does not exist; not an error. - return nil - } - return err - } -} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index bd4b8875f..badbdbf59 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -46,6 +46,22 @@ type BalancerConfig struct { type intermediateBalancerConfig []map[string]json.RawMessage +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + // UnmarshalJSON implements the json.Unmarshaler interface. // // ServiceConfig contains a list of loadBalancingConfigs, each with a name and @@ -62,6 +78,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { return err } + var names []string for i, lbcfg := range ir { if len(lbcfg) != 1 { return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) @@ -76,6 +93,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { for name, jsonCfg = range lbcfg { } + names = append(names, name) builder := balancer.Get(name) if builder == nil { // If the balancer is not registered, move on to the next config. @@ -104,7 +122,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { // return. This means we had a loadBalancingConfig slice but did not // encounter a registered policy. The config is considered invalid in this // case. - return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) } // MethodConfig defines the configuration recommended by the service providers for a diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 710223b8d..e5c6513ed 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -97,7 +97,7 @@ func (s *Status) Err() error { if s.Code() == codes.OK { return nil } - return &Error{e: s.Proto()} + return &Error{s: s} } // WithDetails returns a new status with the provided details messages appended to the status. @@ -136,19 +136,23 @@ func (s *Status) Details() []interface{} { return details } +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + // Error wraps a pointer of a status proto. It implements error and Status, // and a nil *Error should never be returned by this package. type Error struct { - e *spb.Status + s *Status } func (e *Error) Error() string { - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) + return e.s.String() } // GRPCStatus returns the Status represented by se. func (e *Error) GRPCStatus() *Status { - return FromProto(e.e) + return e.s } // Is implements future error.Is functionality. @@ -158,5 +162,5 @@ func (e *Error) Is(target error) bool { if !ok { return false } - return proto.Equal(e.e, tse.e) + return proto.Equal(e.s.s, tse.s.s) } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 4b2964f2a..b3a72276d 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 7913ef1db..999f52cd7 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -35,41 +36,41 @@ var logger = grpclog.Component("core") func log() { once.Do(func() { - logger.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux environments.") }) } -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. func GetCPUTime() int64 { log() return 0 } -// Rusage is an empty struct under non-linux or appengine environment. +// Rusage is an empty struct under non-linux environments. type Rusage struct{} -// GetRusage is a no-op function under non-linux or appengine environment. +// GetRusage is a no-op function under non-linux environments. func GetRusage() *Rusage { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. +// between two Rusage structs. It a no-op function for non-linux environments. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { log() return 0, 0 } -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +// SetTCPUserTimeout is a no-op function under non-linux environments. func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { log() return nil } -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { log() return -1, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 40ef23923..45532f8ae 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -20,13 +20,17 @@ package transport import ( "bytes" + "errors" "fmt" "runtime" + "strconv" "sync" "sync/atomic" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" ) var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { @@ -128,6 +132,14 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM +type earlyAbortStream struct { + streamID uint32 + contentSubtype string + status *status.Status +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + type dataFrame struct { streamID uint32 endStream bool @@ -284,7 +296,7 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // *chan struct{} + trfChan atomic.Value // chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { @@ -298,10 +310,10 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer { // throttle blocks if there are too many incomingSettings/cleanupStreams in the // controlbuf. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(*chan struct{}) + ch, _ := c.trfChan.Load().(chan struct{}) if ch != nil { select { - case <-*ch: + case <-ch: case <-c.done: } } @@ -335,8 +347,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - ch := make(chan struct{}) - c.trfChan.Store(&ch) + c.trfChan.Store(make(chan struct{})) } } c.mu.Unlock() @@ -377,9 +388,9 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are removing the frame that put us over the // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(*chan struct{}) - close(*ch) - c.trfChan.Store((*chan struct{})(nil)) + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) } c.transportResponseFrames-- } @@ -395,7 +406,6 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - c.finish() return nil, ErrConnClosing } } @@ -420,6 +430,14 @@ func (c *controlBuffer) finish() { hdr.onOrphaned(ErrConnClosing) } } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) c.mu.Unlock() } @@ -749,6 +767,24 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return nil } +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: "200"}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + return nil +} + func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true @@ -787,6 +823,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 05d3871e6..1c3459c2b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -141,9 +141,8 @@ type serverHandlerTransport struct { stats stats.Handler } -func (ht *serverHandlerTransport) Close() error { +func (ht *serverHandlerTransport) Close() { ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil } func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d5bbe720d..755863074 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -24,6 +24,7 @@ import ( "io" "math" "net" + "net/http" "strconv" "strings" "sync" @@ -32,15 +33,14 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpcutil" - imetadata "google.golang.org/grpc/internal/metadata" - "google.golang.org/grpc/internal/transport/networktype" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -116,6 +116,9 @@ type http2Client struct { // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string // A condition variable used to signal when the keepalive goroutine should // go dormant. The condition for dormancy is based on the number of active // streams and the `PermitWithoutStream` keepalive client parameter. And @@ -238,9 +241,16 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // Attributes field of resolver.Address, which is shoved into connectCtx // and passed to the credential handshaker. This makes it possible for // address specific arbitrary data to reach the credential handshaker. - contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) - connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + rawConn := conn + // Pull the deadline from the connectCtx, which will be used for + // timeouts in the authentication protocol handshake. Can ignore the + // boolean as the deadline will return the zero value, which will make + // the conn not timeout on I/O operations. + deadline, _ := connectCtx.Deadline() + rawConn.SetDeadline(deadline) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) + rawConn.SetDeadline(time.Time{}) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -347,12 +357,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + t.Close(err) + return nil, err } if n != len(clientPreface) { - t.Close() - return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + t.Close(err) + return nil, err } var ss []http2.Setting @@ -370,14 +382,16 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } err = t.framer.fr.WriteSettings(ss...) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + t.Close(err) + return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + t.Close(err) + return nil, err } } @@ -394,11 +408,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) } } - // If it's a connection error, let reader goroutine handle it - // since there might be data in the buffers. - if _, ok := err.(net.Error); !ok { - t.conn.Close() - } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() return t, nil @@ -454,7 +467,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) Method: callHdr.Method, AuthInfo: t.authInfo, } - ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err @@ -603,26 +616,35 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } -// PerformedIOError wraps an error to indicate IO may have been performed -// before the error occurred. -type PerformedIOError struct { +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. In this case there is no reason to retry at all, as it is +// assumed the RPC would continue to fail on subsequent attempts. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { Err error + + DoNotRetry bool + DoNotTransparentRetry bool } -// Error implements error. -func (p PerformedIOError) Error() string { - return p.Err.Error() +func (e NewStreamError) Error() string { + return e.Err.Error() } // NewStream creates a stream and registers it into the transport as "active" -// streams. +// streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - // We may have performed I/O in the per-RPC creds callback, so do not - // allow transparent retry. - return nil, PerformedIOError{err} + return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -722,23 +744,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true }, hdr) if err != nil { - return nil, err + return nil, &NewStreamError{Err: err} } if success { break } if hdrListSizeErr != nil { - return nil, hdrListSizeErr + return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} } firstTry = false select { case <-ch: - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, errStreamDrain + return nil, &NewStreamError{Err: errStreamDrain} case <-t.ctx.Done(): - return nil, ErrConnClosing + return nil, &NewStreamError{Err: ErrConnClosing} } } if t.statsHandler != nil { @@ -845,12 +867,12 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // This method blocks until the addrConn that initiated this transport is // re-connected. This happens because t.onClose() begins reconnect logic at the // addrConn level and blocks until the addrConn is successfully connected. -func (t *http2Client) Close() error { +func (t *http2Client) Close(err error) { t.mu.Lock() // Make sure we only Close once. if t.state == closing { t.mu.Unlock() - return nil + return } // Call t.onClose before setting the state to closing to prevent the client // from attempting to create new streams ASAP. @@ -866,13 +888,25 @@ func (t *http2Client) Close() error { t.mu.Unlock() t.controlBuf.finish() t.cancel() - err := t.conn.Close() + t.conn.Close() if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) + } + // Notify all active streams. for _, s := range streams { - t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ @@ -880,7 +914,6 @@ func (t *http2Client) Close() error { } t.statsHandler.HandleConn(t.ctx, connEnd) } - return err } // GracefulClose sets the state to draining, which prevents new streams from @@ -899,7 +932,7 @@ func (t *http2Client) GracefulClose() { active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(ErrConnClosing) return } t.controlBuf.put(&incomingGoAway{}) @@ -1145,9 +1178,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { } } id := f.LastStreamID - if id > 0 && id%2 != 1 { + if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1165,7 +1198,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) return } default: @@ -1195,7 +1228,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) } } @@ -1211,12 +1244,17 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } } -func (t *http2Client) GetGoAwayReason() GoAwayReason { +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { t.mu.Lock() defer t.mu.Unlock() - return t.goAwayReason + return t.goAwayReason, t.goAwayDebugMessage } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -1243,11 +1281,124 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. - state.data.isGRPC = !initialHeader - if h2code, err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, h2code, status.Convert(err), nil, endStream) + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1282,9 +1433,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata } } else { // HEADERS frame block carries a Trailers-Only. @@ -1297,9 +1448,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } + // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } // reader runs as a separate goroutine in charge of reading data from network @@ -1313,7 +1468,8 @@ func (t *http2Client) reader() { // Check the validity of server preface. frame, err := t.framer.fr.ReadFrame() if err != nil { - t.Close() // this kicks off resetTransport, so must be last before return + err = connectionErrorf(true, err, "error reading server preface: %v", err) + t.Close(err) // this kicks off resetTransport, so must be last before return return } t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) @@ -1322,7 +1478,8 @@ func (t *http2Client) reader() { } sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.Close() // this kicks off resetTransport, so must be last before return + // this kicks off resetTransport, so must be last before return + t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) return } t.onPrefaceReceipt() @@ -1358,7 +1515,7 @@ func (t *http2Client) reader() { continue } else { // Transport error. - t.Close() + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) return } } @@ -1417,7 +1574,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) return } t.mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 7c6c89d4f..19c13e041 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -102,11 +102,11 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when drain(...) is called the first time. + // drainChan is initialized when Drain() is called the first time. // After which the server writes out the first GoAway(with ID 2^31-1) frame. // Then an independent goroutine will be launched to later send the second GoAway. // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is // already underway. drainChan chan struct{} state transportState @@ -125,9 +125,30 @@ type http2Server struct { connectionID uint64 } -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a non-nil transport and a nil-error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize @@ -210,14 +231,15 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } + done := make(chan struct{}) t := &http2Server{ - ctx: context.Background(), + ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, + authInfo: authInfo, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), @@ -266,6 +288,13 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Skipping the error here will help + // reduce log clutter. + if err == io.EOF { + return nil, nil + } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } if !bytes.Equal(preface, clientPreface) { @@ -295,6 +324,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } } t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() go t.keepalive() @@ -304,37 +334,92 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } - if h2code, err := state.decodeHeader(frame); err != nil { - if _, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: h2code, - onWrite: func() {}, - }) - } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) return false } buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = false + mdata = make(map[string][]string) + httpMethod string + // headerError is set if an error is encountered while parsing the headers + headerError bool + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = true + } + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = true + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || headerError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return false } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } @@ -347,33 +432,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx = peer.NewContext(s.ctx, pr) // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) - } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) - } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) - } - if t.inTapHandle != nil { - var err error - info := &tap.Info{ - FullMethodName: state.data.method, + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) } - s.ctx, err = t.inTapHandle(s.ctx, info) - if err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) - } - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: true, - rstCode: http2.ErrCodeRefusedStream, - onWrite: func() {}, - }) - s.cancel() - return false + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) } } t.mu.Lock() @@ -403,10 +468,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return true } t.maxStreamID = streamID - if state.data.httpMethod != http.MethodPost { + if httpMethod != http.MethodPost { t.mu.Unlock() if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", state.data.httpMethod) + logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) } t.controlBuf.put(&cleanupStream{ streamID: streamID, @@ -417,6 +482,25 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.cancel() return false } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + }) + return false + } + } t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} @@ -438,7 +522,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(state.data.mdata).Copy(), + Header: metadata.MD(mdata).Copy(), } t.stats.HandleRPC(s.ctx, inHeader) } @@ -1005,12 +1089,12 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() return } idleTimer.Reset(val) case <-ageTimer.C: - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: @@ -1064,11 +1148,11 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { +func (t *http2Server) Close() { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return } t.state = closing streams := t.activeStreams @@ -1076,7 +1160,9 @@ func (t *http2Server) Close() error { t.mu.Unlock() t.controlBuf.finish() close(t.done) - err := t.conn.Close() + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) + } if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) } @@ -1088,7 +1174,6 @@ func (t *http2Server) Close() error { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return err } // deleteStream deletes the stream s from transport's active streams. @@ -1153,17 +1238,13 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { t.mu.Lock() defer t.mu.Unlock() if t.drainChan != nil { return } t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1281,3 +1362,18 @@ func getJitter(v time.Duration) time.Duration { j := grpcrand.Int63n(2*r) - r return time.Duration(j) } + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index c7dee140c..d8247bcdf 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -39,7 +39,6 @@ import ( spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -96,53 +95,6 @@ var ( logger = grpclog.Component("transport") ) -type parsedHeaderData struct { - encoding string - // statusGen caches the stream status received from the trailer the server - // sent. Client side only. Do not access directly. After all trailers are - // parsed, use the status method to retrieve the status. - statusGen *status.Status - // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not - // intended for direct access outside of parsing. - rawStatusCode *int - rawStatusMsg string - httpStatus *int - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - httpMethod string - // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte - contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { - // whether decoding on server side or not - serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -180,14 +132,6 @@ func isWhitelistedHeader(hdr string) bool { } } -func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { - // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) - } - return d.data.statusGen -} - const binHdrSuffix = "-bin" func encodeBinHeader(v []byte) string { @@ -217,168 +161,16 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return http2.ErrCodeFrameSize, status.Error(codes.Internal, "peer header list size exceeded limit") - } - - for _, hf := range frame.Fields { - d.processHeaderField(hf) - } - - if d.data.isGRPC { - if d.data.grpcErr != nil { - return http2.ErrCodeProtocol, d.data.grpcErr - } - if d.serverSide { - return http2.ErrCodeNo, nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } - return http2.ErrCodeNo, nil - } - - // HTTP fallback mode - if d.data.httpErr != nil { - return http2.ErrCodeProtocol, d.data.httpErr - } - - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) - - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] - if !ok { - code = codes.Unknown - } - } - - return http2.ErrCodeProtocol, status.Error(code, d.constructHTTPErrMsg()) -} - -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - var errMsgs []string - - if d.data.httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) - } - - if d.data.contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) - } - - return strings.Join(errMsgs, "; ") -} - -func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err } - d.data.mdata[k] = append(d.data.mdata[k], v) -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) - if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return - } - d.data.contentSubtype = contentSubtype - // TODO: do we want to propagate the whole content-type in the metadata, - // or come up with a way to just propagate the content-subtype if it was set? - // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} - // in the metadata? - d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true - case "grpc-encoding": - d.data.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.data.rawStatusCode = &code - case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) - case "grpc-status-details-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - s := &spb.Status{} - if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - d.data.statusGen = status.FromProto(s) - case "grpc-timeout": - d.data.timeoutSet = true - var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) - } - case ":path": - d.data.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return - } - d.data.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return - } - d.data.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return - } - d.data.statsTrace = v - d.addMetadata(f.Name, string(v)) - case ":method": - d.data.httpMethod = f.Value - default: - if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { - break - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - if logger.V(logLevel) { - logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - } - return - } - d.addMetadata(f.Name, v) + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err } + return status.FromProto(st), nil } type timeoutUnit uint8 diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go index 96967428b..7bb53cff1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -17,7 +17,7 @@ */ // Package networktype declares the network type to be used in the default -// dailer. Attribute of a resolver.Address. +// dialer. Attribute of a resolver.Address. package networktype import ( diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 5cf7c5f80..d3bf65b2b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -30,6 +30,7 @@ import ( "net" "sync" "sync/atomic" + "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -518,7 +519,8 @@ const ( // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 - AuthInfo credentials.AuthInfo + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters @@ -532,12 +534,6 @@ type ServerConfig struct { HeaderTableSize *uint32 } -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { - return newHTTP2Server(conn, config) -} - // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. @@ -622,7 +618,7 @@ type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. - Close() error + Close(err error) // GracefulClose starts to tear down the transport: the transport will stop // accepting new RPCs and NewStream will return error. Once all streams are @@ -656,8 +652,9 @@ type ClientTransport interface { // HTTP/2). GoAway() <-chan struct{} - // GetGoAwayReason returns the reason why GoAway frame was received. - GetGoAwayReason() GoAwayReason + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr @@ -693,7 +690,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() error + Close() // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/vendor/google.golang.org/grpc/internal/xds/env/env.go b/vendor/google.golang.org/grpc/internal/xds/env/env.go new file mode 100644 index 000000000..b171ac91f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/env/env.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package env acts a single source of definition for all environment variables +// related to the xDS implementation in gRPC. +package env + +import ( + "os" + "strings" +) + +const ( + // BootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable BootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + BootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // BootstrapFileContentEnv is the env variable to set bootstrapp file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable BootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" + clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" + aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RBAC" + + c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" + c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" +) + +var ( + // BootstrapFileName holds the name of the file which contains xDS bootstrap + // configuration. Users can specify the location of the bootstrap file by + // setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + BootstrapFileName = os.Getenv(BootstrapFileNameEnv) + // BootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by + // setting the environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + BootstrapFileContent = os.Getenv(BootstrapFileContentEnv) + // RingHashSupport indicates whether ring hash support is enabled, which can + // be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + RingHashSupport = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + // ClientSideSecuritySupport is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + ClientSideSecuritySupport = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + // AggregateAndDNSSupportEnv indicates whether processing of aggregated + // cluster and DNS cluster is enabled, which can be enabled by setting the + // environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to + // "true". + AggregateAndDNSSupportEnv = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + + // RetrySupport indicates whether xDS retry is enabled. + RetrySupport = !strings.EqualFold(os.Getenv(retrySupportEnv), "false") + + // RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled. + RBACSupport = strings.EqualFold(os.Getenv(rbacSupportEnv), "true") + + // C2PResolverSupport indicates whether support for C2P resolver is enabled. + // This can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". + C2PResolverSupport = strings.EqualFold(os.Getenv(c2pResolverSupportEnv), "true") + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) +) diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go new file mode 100644 index 000000000..3677c3f04 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index cf6d1b947..3604c7819 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -75,13 +75,9 @@ func Pairs(kv ...string) MD { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } md := MD{} - var key string - for i, s := range kv { - if i%2 == 0 { - key = strings.ToLower(s) - continue - } - md[key] = append(md[key], s) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) } return md } @@ -97,12 +93,16 @@ func (md MD) Copy() MD { } // Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. func (md MD) Get(k string) []string { k = strings.ToLower(k) return md[k] } // Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. func (md MD) Set(k string, vals ...string) { if len(vals) == 0 { return @@ -111,7 +111,10 @@ func (md MD) Set(k string, vals ...string) { md[k] = vals } -// Append adds the values to key k, not overwriting what was already stored at that key. +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. func (md MD) Append(k string, vals ...string) { if len(vals) == 0 { return @@ -120,9 +123,17 @@ func (md MD) Append(k string, vals ...string) { md[k] = append(md[k], vals...) } +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + // Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -149,8 +160,8 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { } // AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the -// documentation of Pairs for a description of kv. +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) @@ -163,20 +174,34 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromIncomingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdIncomingKey{}).(MD) - return +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := MD{} + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } + return out, true } -// FromOutgoingContextRaw returns the un-merged, intermediary contents -// of rawMD. Remember to perform strings.ToLower on the keys. The returned -// MD should not be modified. Writing to it may cause races. Modification -// should be made to copies of the returned MD. +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // -// This is intended for gRPC-internal use ONLY. +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +// +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { @@ -186,21 +211,34 @@ func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { return raw.md, raw.added, true } -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. func FromOutgoingContext(ctx context.Context) (MD, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, false } - mds := make([]MD, 0, len(raw.added)+1) - mds = append(mds, raw.md) - for _, vv := range raw.added { - mds = append(mds, Pairs(vv...)) + out := MD{} + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } } - return Join(mds...), ok + return out, ok } type rawMD struct { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a58174b6f..0878ada9d 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -147,7 +147,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. logger.Error("subconn returned from pick is not *acBalancerWrapper") continue } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { return t, doneChannelzWrapper(acw, pickResult.Done), nil } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index b858c2a5e..f194d14a0 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -107,10 +107,12 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S } switch s.ConnectivityState { - case connectivity.Ready, connectivity.Idle: + case connectivity.Ready: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) case connectivity.Connecting: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + case connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ ConnectivityState: s.ConnectivityState, @@ -122,6 +124,12 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S func (b *pickfirstBalancer) Close() { } +func (b *pickfirstBalancer) ExitIdle() { + if b.state == connectivity.Idle { + b.sc.Connect() + } +} + type picker struct { result balancer.PickResult err error @@ -131,6 +139,17 @@ func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + sc balancer.SubConn +} + +func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + i.sc.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + func init() { balancer.Register(newPickfirstBuilder()) } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index fc6725b89..dfd3226a1 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -48,11 +48,6 @@ mkdir -p ${WORKDIR}/googleapis/google/rpc echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto -# Pull in the MeshCA service proto. -mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1 -echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto" -curl --silent https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto > ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto - mkdir -p ${WORKDIR}/out # Generates sources without the embed requirement @@ -76,7 +71,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto - ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto ) # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an @@ -122,8 +116,4 @@ mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_s mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ -# istio/google/security/meshca/v1/meshca.proto does not have a go_package option. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ -mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e9fa8e33d..6a9d234a5 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -181,7 +181,7 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. - UpdateState(State) + UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling // ResolveNow on the Resolver with exponential backoff. diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index f2d81968f..2c47cd54f 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -22,7 +22,6 @@ import ( "fmt" "strings" "sync" - "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" @@ -41,8 +40,7 @@ type ccResolverWrapper struct { done *grpcsync.Event curState resolver.State - pollingMu sync.Mutex - polling chan struct{} + incomingMu sync.Mutex // Synchronizes all the incoming calls. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and @@ -93,71 +91,37 @@ func (ccr *ccResolverWrapper) close() { ccr.resolverMu.Unlock() } -// poll begins or ends asynchronous polling of the resolver based on whether -// err is ErrBadResolverState. -func (ccr *ccResolverWrapper) poll(err error) { - ccr.pollingMu.Lock() - defer ccr.pollingMu.Unlock() - if err != balancer.ErrBadResolverState { - // stop polling - if ccr.polling != nil { - close(ccr.polling) - ccr.polling = nil - } - return - } - if ccr.polling != nil { - // already polling - return - } - p := make(chan struct{}) - ccr.polling = p - go func() { - for i := 0; ; i++ { - ccr.resolveNow(resolver.ResolveNowOptions{}) - t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) - select { - case <-p: - t.Stop() - return - case <-ccr.done.Done(): - // Resolver has been closed. - t.Stop() - return - case <-t.C: - select { - case <-p: - return - default: - } - // Timer expired; re-resolve. - } - } - }() -} - -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { - return + return nil } channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } ccr.curState = s - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + return balancer.ErrBadResolverState + } + return nil } func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) + ccr.cc.updateResolverState(resolver.State{}, err) } // NewAddress is called by the resolver implementation to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } @@ -166,12 +130,14 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } ccr.curState.Addresses = addrs - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.cc.updateResolverState(ccr.curState, nil) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } @@ -183,14 +149,13 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { scpr := parseServiceConfig(sc) if scpr.Err != nil { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - ccr.poll(balancer.ErrBadResolverState) return } if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) } ccr.curState.ServiceConfig = scpr - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.cc.updateResolverState(ccr.curState, nil) } func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index c0a1208f2..87987a2e6 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -258,7 +258,8 @@ func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { } // WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will // retry the call if it fails due to a transient error. gRPC will not retry if @@ -429,9 +430,10 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { } func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} -// ForceCodec returns a CallOption that will set the given Codec to be -// used for all request and response messages for a call. The result of calling -// String() will be used as the content-subtype in a case-insensitive manner. +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. // // See Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for @@ -827,33 +829,45 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { - if err == nil || err == io.EOF { + switch err { + case nil, io.EOF: return err - } - if err == io.ErrUnexpectedEOF { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } - if _, ok := status.FromError(err); ok { - return err - } + switch e := err.(type) { case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } + case *transport.NewStreamError: + return toRPCErr(e.Err) } + + if _, ok := status.FromError(err); ok { + return err + } + return status.Error(codes.Unknown, err.Error()) } // setCallInfoCodec should only be called after CallOptions have been applied. func setCallInfoCodec(c *callInfo) error { if c.codec != nil { - // codec was already set by a CallOption; use it. + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } return nil } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 7a2aa28a1..557f29559 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -57,12 +57,22 @@ import ( const ( defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" ) func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } } var statusOK = status.New(codes.OK, "") @@ -107,9 +117,12 @@ type serverWorkerData struct { type Server struct { opts serverOptions - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool serve bool drain bool cv *sync.Cond // signaled when connections close for GracefulStop @@ -266,6 +279,35 @@ func CustomCodec(codec Codec) ServerOption { }) } +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + // RPCCompressor returns a ServerOption that sets a compressor for outbound // messages. For backward compatibility, all outbound messages will be sent // using this compressor, regardless of incoming message compression. By @@ -376,6 +418,11 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func InTapHandle(h tap.ServerInHandle) ServerOption { return newFuncServerOption(func(o *serverOptions) { if o.inTapHandle != nil { @@ -519,7 +566,7 @@ func NewServer(opt ...ServerOption) *Server { s := &Server{ lis: make(map[net.Listener]bool), opts: opts, - conns: make(map[transport.ServerTransport]bool), + conns: make(map[string]map[transport.ServerTransport]bool), services: make(map[string]*serviceInfo), quit: grpcsync.NewEvent(), done: grpcsync.NewEvent(), @@ -663,13 +710,6 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { // the server being stopped. var ErrServerStopped = errors.New("grpc: the server has been stopped") -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - type listenSocket struct { net.Listener channelzID int64 @@ -778,7 +818,7 @@ func (s *Server) Serve(lis net.Listener) error { // s.conns before this conn can be added. s.serveWG.Add(1) go func() { - s.handleRawConn(rawConn) + s.handleRawConn(lis.Addr().String(), rawConn) s.serveWG.Done() }() } @@ -786,49 +826,45 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn forks a goroutine to handle a just-accepted connection that // has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { if s.quit.HasFired() { rawConn.Close() return } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - rawConn.Close() - } - rawConn.SetDeadline(time.Time{}) - return - } // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) if st == nil { return } - rawConn.SetDeadline(time.Time{}) - if !s.addConn(st) { + if !s.addConn(lisAddr, st) { return } go func() { s.serveStreams(st) - s.removeConn(st) + s.removeConn(lisAddr, st) }() } +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain() + } + s.mu.Unlock() +} + // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { config := &transport.ServerConfig{ MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, StatsHandler: s.opts.statsHandler, KeepaliveParams: s.opts.keepaliveParams, @@ -841,13 +877,22 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } - st, err := transport.NewServerTransport("http2", c, config) + st, err := transport.NewServerTransport(c, config) if err != nil { s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() - c.Close() - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + c.Close() + } + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != credentials.ErrConnDispatched { + if err != io.EOF { + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + } return nil } @@ -924,10 +969,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - if !s.addConn(st) { + if !s.addConn(listenerAddressForServeHTTP, st) { return } - defer s.removeConn(st) + defer s.removeConn(listenerAddressForServeHTTP, st) s.serveStreams(st) } @@ -955,7 +1000,7 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea return trInfo } -func (s *Server) addConn(st transport.ServerTransport) bool { +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { @@ -967,15 +1012,28 @@ func (s *Server) addConn(st transport.ServerTransport) bool { // immediately. st.Drain() } - s.conns[st] = true + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true return true } -func (s *Server) removeConn(st transport.ServerTransport) { +func (s *Server) removeConn(addr string, st transport.ServerTransport) { s.mu.Lock() defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, st) + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } s.cv.Broadcast() } } @@ -1040,22 +1098,24 @@ func chainUnaryServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) - } + chainedInt = chainUnaryInterceptors(interceptors) } s.opts.unaryInt = chainedInt } -// getChainUnaryHandler recursively generate the chained UnaryHandler -func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(ctx context.Context, req interface{}) (interface{}, error) { - return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + var i int + var next UnaryHandler + next = func(ctx context.Context, req interface{}) (interface{}, error) { + if i == len(interceptors)-1 { + return interceptors[i](ctx, req, info, handler) + } + i++ + return interceptors[i-1](ctx, req, info, next) + } + return next(ctx, req) } } @@ -1069,7 +1129,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1321,22 +1383,24 @@ func chainStreamServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) - } + chainedInt = chainStreamInterceptors(interceptors) } s.opts.streamInt = chainedInt } -// getChainStreamHandler recursively generate the chained StreamHandler -func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(srv interface{}, ss ServerStream) error { - return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + var i int + var next StreamHandler + next = func(srv interface{}, ss ServerStream) error { + if i == len(interceptors)-1 { + return interceptors[i](srv, ss, info, handler) + } + i++ + return interceptors[i-1](srv, ss, info, next) + } + return next(srv, ss) } } @@ -1349,7 +1413,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1452,6 +1518,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp } } + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) } @@ -1519,7 +1587,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() @@ -1639,7 +1707,7 @@ func (s *Server) Stop() { s.mu.Lock() listeners := s.lis s.lis = nil - st := s.conns + conns := s.conns s.conns = nil // interrupt GracefulStop if Stop and GracefulStop are called concurrently. s.cv.Broadcast() @@ -1648,8 +1716,10 @@ func (s *Server) Stop() { for lis := range listeners { lis.Close() } - for c := range st { - c.Close() + for _, cs := range conns { + for st := range cs { + st.Close() + } } if s.opts.numServerWorkers > 0 { s.stopServerWorkers() @@ -1686,8 +1756,10 @@ func (s *Server) GracefulStop() { } s.lis = nil if !s.drain { - for st := range s.conns { - st.Drain() + for _, conns := range s.conns { + for st := range conns { + st.Drain() + } } s.drain = true } diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 63e476ee7..0285dcc6a 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -36,15 +36,22 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC begins. +// Begin contains stats when an RPC attempt begins. // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool - // BeginTime is the time when the RPC begins. + // BeginTime is the time when the RPC attempt begins. BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool } // IsClient indicates if the stats information is from client side. diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 77d25742c..625d47b34 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -52,14 +52,20 @@ import ( // of the RPC. type StreamHandler func(srv interface{}, stream ServerStream) error -// StreamDesc represents a streaming RPC service's method specification. +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. type StreamDesc struct { - StreamName string - Handler StreamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends } // Stream defines the common interface a client or server stream has to satisfy. @@ -268,33 +274,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - } - ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) - sh := cc.dopts.copts.StatsHandler - var beginTime time.Time - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, - } - sh.HandleRPC(ctx, begin) - } cs := &clientStream{ callHdr: callHdr, @@ -308,7 +287,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cp: cp, comp: comp, cancel: cancel, - beginTime: beginTime, firstAttempt: true, onCommit: onCommit, } @@ -317,9 +295,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs.binlog = binarylog.GetMethodLogger(method) - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { + if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { cs.finish(err) return nil, err } @@ -367,8 +343,43 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client // newAttemptLocked creates a new attempt with a transport. // If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { +func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + sh := cs.cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, + } + sh.HandleRPC(ctx, begin) + } + + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + newAttempt := &csAttempt{ + ctx: ctx, + beginTime: beginTime, cs: cs, dc: cs.cc.dopts.dc, statsHandler: sh, @@ -383,15 +394,14 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r } }() - if err := cs.ctx.Err(); err != nil { + if err := ctx.Err(); err != nil { return toRPCErr(err) } - ctx := cs.ctx if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } @@ -411,14 +421,11 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - if _, ok := err.(transport.PerformedIOError); ok { - // Return without converting to an RPC error so retry code can - // inspect. - return err - } - return toRPCErr(err) + // Return without converting to an RPC error so retry code can + // inspect. + return err } cs.attempt.s = s cs.attempt.p = &parser{r: s} @@ -439,8 +446,7 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream - beginTime time.Time + sentLast bool // sent an end stream methodConfig *MethodConfig @@ -480,6 +486,7 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { + ctx context.Context cs *clientStream t transport.ClientTransport s *transport.Stream @@ -498,6 +505,7 @@ type csAttempt struct { trInfo *traceInfo statsHandler stats.Handler + beginTime time.Time } func (cs *clientStream) commitAttemptLocked() { @@ -515,46 +523,57 @@ func (cs *clientStream) commitAttempt() { } // shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { - unprocessed := false +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (cs *clientStream) shouldRetry(err error) (bool, error) { if cs.attempt.s == nil { - pioErr, ok := err.(transport.PerformedIOError) - if ok { - // Unwrap error. - err = toRPCErr(pioErr.Err) - } else { - unprocessed = true + // Error from NewClientStream. + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected, but assume no I/O was performed and the RPC is not + // fatal, so retry indefinitely. + return true, nil } - if !ok && !cs.callInfo.failFast { - // In the event of a non-IO operation error from NewStream, we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil + + // Unwrap and convert error. + err = toRPCErr(nse.Err) + + // Never retry DoNotRetry errors, which indicate the RPC should not be + // retried due to max header list size violation, etc. + if nse.DoNotRetry { + return false, err + } + + // In the event of a non-IO operation error from NewStream, we never + // attempted to write anything to the wire, so we can retry + // indefinitely. + if !nse.DoNotTransparentRetry { + return true, nil } } if cs.finished || cs.committed { // RPC is finished or committed; cannot retry. - return err + return false, err } // Wait for the trailers. + unprocessed := false if cs.attempt.s != nil { <-cs.attempt.s.Done() unprocessed = cs.attempt.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - return nil + return true, nil } if cs.cc.dopts.disableRetry { - return err + return false, err } pushback := 0 hasPushback := false if cs.attempt.s != nil { if !cs.attempt.s.TrailersOnly() { - return err + return false, err } // TODO(retry): Move down if the spec changes to not check server pushback @@ -565,13 +584,13 @@ func (cs *clientStream) shouldRetry(err error) error { if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } hasPushback = true } else if len(sps) > 1 { channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } } @@ -584,16 +603,16 @@ func (cs *clientStream) shouldRetry(err error) error { rp := cs.methodConfig.RetryPolicy if rp == nil || !rp.RetryableStatusCodes[code] { - return err + return false, err } // Note: the ordering here is important; we count this as a failure // only if the code matched a retryable code. if cs.retryThrottler.throttle() { - return err + return false, err } if cs.numRetries+1 >= rp.MaxAttempts { - return err + return false, err } var dur time.Duration @@ -616,23 +635,24 @@ func (cs *clientStream) shouldRetry(err error) error { select { case <-t.C: cs.numRetries++ - return nil + return false, nil case <-cs.ctx.Done(): t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() + return false, status.FromContextError(cs.ctx.Err()).Err() } } // Returns nil if a retry was performed and succeeded; error otherwise. func (cs *clientStream) retryLocked(lastErr error) error { for { - cs.attempt.finish(lastErr) - if err := cs.shouldRetry(lastErr); err != nil { + cs.attempt.finish(toRPCErr(lastErr)) + isTransparent, err := cs.shouldRetry(lastErr) + if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(nil, nil); err != nil { + if err := cs.newAttemptLocked(isTransparent); err != nil { return err } if lastErr = cs.replayBufferLocked(); lastErr == nil { @@ -653,7 +673,11 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) for { if cs.committed { cs.mu.Unlock() - return op(cs.attempt) + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) } a := cs.attempt cs.mu.Unlock() @@ -918,7 +942,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return io.EOF } if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -966,7 +990,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { a.mu.Unlock() } if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1028,12 +1052,12 @@ func (a *csAttempt) finish(err error) { if a.statsHandler != nil { end := &stats.End{ Client: true, - BeginTime: a.cs.beginTime, + BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.cs.ctx, end) + a.statsHandler.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index caea1ebed..dbf34e6bb 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -37,16 +37,16 @@ type Info struct { // TODO: More to be added. } -// ServerInHandle defines the function which runs before a new stream is created -// on the server side. If it returns a non-nil error, the stream will not be -// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. -// The client will receive an RPC error "code = Unavailable, desc = stream -// terminated by RST_STREAM with error code: REFUSED_STREAM". +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. // // It's intended to be used in situations where you don't want to waste the -// resources to accept the new stream (e.g. rate-limiting). And the content of -// the error will be ignored and won't be sent back to the client. For other -// general usages, please use interceptors. +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. // // Note that it is executed in the per-connection I/O goroutine(s) instead of // per-RPC goroutine. Therefore, users should NOT have any diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index c3b87eb5a..48594bc24 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.37.0" +const Version = "1.41.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index dcd939bb3..d923187a7 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -32,26 +32,14 @@ PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" go version if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - fi + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then PROTOBUF_VERSION=3.14.0 @@ -101,16 +89,6 @@ not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - -# - gofmt, goimports, golint (with exceptions for generated code), go vet. -gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | not grep -vE "\.pb\.go" -golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" -go vet -all ./... - misspell -error . # - Check that generated proto files are up to date. @@ -120,12 +98,22 @@ if [[ -z "${VET_SKIP_PROTO}" ]]; then (git status; git --no-pager diff; exit 1) fi -# - Check that our modules are tidy. -if go help mod >& /dev/null; then - find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + + go mod tidy git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) -fi + popd +done # - Collection of static analysis checks # diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod deleted file mode 100644 index 2cbb85aea..000000000 --- a/vendor/gopkg.in/yaml.v2/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gopkg.in/yaml.v2 - -go 1.15 - -require gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod deleted file mode 100644 index f407ea321..000000000 --- a/vendor/gopkg.in/yaml.v3/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module "gopkg.in/yaml.v3" - -require ( - "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 -) diff --git a/vendor/modules.txt b/vendor/modules.txt index 956f2da57..fab7e6974 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,84 +1,80 @@ -# github.com/RoaringBitmap/roaring v0.4.23 +# github.com/RoaringBitmap/roaring v0.9.4 +## explicit; go 1.14 github.com/RoaringBitmap/roaring +github.com/RoaringBitmap/roaring/internal +# github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f +## explicit; go 1.12 +github.com/axiomhq/hyperloglog # github.com/beorn7/perks v1.0.1 +## explicit; go 1.11 github.com/beorn7/perks/quantile -# github.com/blevesearch/bleve/v2 v2.0.3 -## explicit -github.com/blevesearch/bleve/v2 -github.com/blevesearch/bleve/v2/analysis -github.com/blevesearch/bleve/v2/analysis/analyzer/keyword -github.com/blevesearch/bleve/v2/analysis/analyzer/standard -github.com/blevesearch/bleve/v2/analysis/datetime/flexible -github.com/blevesearch/bleve/v2/analysis/datetime/optional -github.com/blevesearch/bleve/v2/analysis/lang/en -github.com/blevesearch/bleve/v2/analysis/token/lowercase -github.com/blevesearch/bleve/v2/analysis/token/porter -github.com/blevesearch/bleve/v2/analysis/token/stop -github.com/blevesearch/bleve/v2/analysis/tokenizer/single -github.com/blevesearch/bleve/v2/analysis/tokenizer/unicode -github.com/blevesearch/bleve/v2/document -github.com/blevesearch/bleve/v2/geo -github.com/blevesearch/bleve/v2/index/scorch -github.com/blevesearch/bleve/v2/index/scorch/mergeplan -github.com/blevesearch/bleve/v2/index/upsidedown -github.com/blevesearch/bleve/v2/index/upsidedown/store/boltdb -github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap -github.com/blevesearch/bleve/v2/mapping -github.com/blevesearch/bleve/v2/numeric -github.com/blevesearch/bleve/v2/registry -github.com/blevesearch/bleve/v2/search -github.com/blevesearch/bleve/v2/search/collector -github.com/blevesearch/bleve/v2/search/facet -github.com/blevesearch/bleve/v2/search/highlight -github.com/blevesearch/bleve/v2/search/highlight/format/html -github.com/blevesearch/bleve/v2/search/highlight/fragmenter/simple -github.com/blevesearch/bleve/v2/search/highlight/highlighter/html -github.com/blevesearch/bleve/v2/search/highlight/highlighter/simple -github.com/blevesearch/bleve/v2/search/query -github.com/blevesearch/bleve/v2/search/scorer -github.com/blevesearch/bleve/v2/search/searcher -github.com/blevesearch/bleve/v2/size -# github.com/blevesearch/bleve_index_api v1.0.0 -github.com/blevesearch/bleve_index_api +# github.com/bits-and-blooms/bitset v1.2.0 +## explicit; go 1.14 +github.com/bits-and-blooms/bitset # github.com/blevesearch/go-porterstemmer v1.0.3 +## explicit; go 1.13 github.com/blevesearch/go-porterstemmer -# github.com/blevesearch/mmap-go v1.0.2 +# github.com/blevesearch/mmap-go v1.0.3 +## explicit; go 1.13 github.com/blevesearch/mmap-go -# github.com/blevesearch/scorch_segment_api/v2 v2.0.1 -github.com/blevesearch/scorch_segment_api/v2 # github.com/blevesearch/segment v0.9.0 +## explicit; go 1.13 github.com/blevesearch/segment # github.com/blevesearch/snowballstem v0.9.0 +## explicit; go 1.13 github.com/blevesearch/snowballstem github.com/blevesearch/snowballstem/english -# github.com/blevesearch/upsidedown_store_api v1.0.1 -## explicit -github.com/blevesearch/upsidedown_store_api -# github.com/blevesearch/vellum v1.0.3 +# github.com/blevesearch/vellum v1.0.7 +## explicit; go 1.13 github.com/blevesearch/vellum github.com/blevesearch/vellum/levenshtein github.com/blevesearch/vellum/regexp github.com/blevesearch/vellum/utf8 -# github.com/blevesearch/zapx/v11 v11.2.0 -github.com/blevesearch/zapx/v11 -# github.com/blevesearch/zapx/v12 v12.2.0 -github.com/blevesearch/zapx/v12 -# github.com/blevesearch/zapx/v13 v13.2.0 -github.com/blevesearch/zapx/v13 -# github.com/blevesearch/zapx/v14 v14.2.0 -github.com/blevesearch/zapx/v14 -# github.com/blevesearch/zapx/v15 v15.2.0 -github.com/blevesearch/zapx/v15 +# github.com/blugelabs/bluge v0.1.7 +## explicit; go 1.13 +github.com/blugelabs/bluge +github.com/blugelabs/bluge/analysis +github.com/blugelabs/bluge/analysis/analyzer +github.com/blugelabs/bluge/analysis/lang/en +github.com/blugelabs/bluge/analysis/token +github.com/blugelabs/bluge/analysis/tokenizer +github.com/blugelabs/bluge/index +github.com/blugelabs/bluge/index/lock +github.com/blugelabs/bluge/index/mergeplan +github.com/blugelabs/bluge/numeric +github.com/blugelabs/bluge/numeric/geo +github.com/blugelabs/bluge/search +github.com/blugelabs/bluge/search/aggregations +github.com/blugelabs/bluge/search/collector +github.com/blugelabs/bluge/search/searcher +github.com/blugelabs/bluge/search/similarity +# github.com/blugelabs/bluge_segment_api v0.2.0 +## explicit; go 1.14 +github.com/blugelabs/bluge_segment_api +# github.com/blugelabs/ice v0.2.0 +## explicit; go 1.12 +github.com/blugelabs/ice +# github.com/blugelabs/query_string v0.2.0 +## explicit; go 1.14 +github.com/blugelabs/query_string +# github.com/caio/go-tdigest v3.1.0+incompatible +## explicit +github.com/caio/go-tdigest +# github.com/cespare/xxhash/v2 v2.1.1 +## explicit; go 1.11 +github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.1 +## explicit github.com/davecgh/go-spew/spew -# github.com/dgrijalva/jwt-go v3.2.1-0.20200107013213-dc14462fd587+incompatible +# github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc ## explicit -github.com/dgrijalva/jwt-go +github.com/dgryski/go-metro # github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 +## explicit github.com/dlclark/regexp2 github.com/dlclark/regexp2/syntax -# github.com/dop251/goja v0.0.0-20210406175830-1b11a6af686d -## explicit +# github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06 +## explicit; go 1.14 github.com/dop251/goja github.com/dop251/goja/ast github.com/dop251/goja/file @@ -88,44 +84,47 @@ github.com/dop251/goja/parser github.com/dop251/goja/token github.com/dop251/goja/unistring # github.com/felixge/httpsnoop v1.0.1 +## explicit; go 1.13 github.com/felixge/httpsnoop # github.com/ghodss/yaml v1.0.0 -github.com/ghodss/yaml -# github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a ## explicit -github.com/glycerine/go-unsnap-stream +github.com/ghodss/yaml # github.com/go-sourcemap/sourcemap v2.1.3+incompatible +## explicit github.com/go-sourcemap/sourcemap github.com/go-sourcemap/sourcemap/internal/base64vlq # github.com/gofrs/uuid v4.0.0+incompatible ## explicit github.com/gofrs/uuid -# github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b +# github.com/golang-jwt/jwt/v4 v4.1.0 +## explicit; go 1.15 +github.com/golang-jwt/jwt/v4 +# github.com/golang/glog v1.0.0 +## explicit; go 1.11 github.com/golang/glog # github.com/golang/protobuf v1.5.2 -## explicit +## explicit; go 1.9 github.com/golang/protobuf/descriptor github.com/golang/protobuf/proto github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/struct github.com/golang/protobuf/ptypes/timestamp -github.com/golang/protobuf/ptypes/wrappers # github.com/golang/snappy v0.0.1 +## explicit github.com/golang/snappy # github.com/gorilla/handlers v1.5.1 -## explicit +## explicit; go 1.14 github.com/gorilla/handlers # github.com/gorilla/mux v1.8.0 -## explicit +## explicit; go 1.12 github.com/gorilla/mux # github.com/gorilla/websocket v1.4.2 -## explicit +## explicit; go 1.12 github.com/gorilla/websocket -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.3.0 -## explicit +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0 +## explicit; go 1.14 github.com/grpc-ecosystem/grpc-gateway/v2/internal/casing github.com/grpc-ecosystem/grpc-gateway/v2/internal/codegenerator github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor @@ -140,92 +139,97 @@ github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopena github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/heroiclabs/nakama-common v1.19.1-0.20211028165853-d67f8b2631f6 -## explicit +# github.com/heroiclabs/nakama-common v0.0.0-20211029002510-769d7938e21f +## explicit; go 1.14 github.com/heroiclabs/nakama-common/api github.com/heroiclabs/nakama-common/rtapi github.com/heroiclabs/nakama-common/runtime # github.com/jackc/chunkreader/v2 v2.0.1 +## explicit; go 1.12 github.com/jackc/chunkreader/v2 -# github.com/jackc/pgconn v1.8.1 -## explicit +# github.com/jackc/pgconn v1.10.0 +## explicit; go 1.12 github.com/jackc/pgconn github.com/jackc/pgconn/internal/ctxwatch github.com/jackc/pgconn/stmtcache # github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451 -## explicit +## explicit; go 1.12 github.com/jackc/pgerrcode # github.com/jackc/pgio v1.0.0 +## explicit; go 1.12 github.com/jackc/pgio # github.com/jackc/pgpassfile v1.0.0 +## explicit; go 1.12 github.com/jackc/pgpassfile -# github.com/jackc/pgproto3/v2 v2.0.6 +# github.com/jackc/pgproto3/v2 v2.1.1 +## explicit; go 1.12 github.com/jackc/pgproto3/v2 # github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b +## explicit; go 1.14 github.com/jackc/pgservicefile -# github.com/jackc/pgtype v1.7.0 -## explicit +# github.com/jackc/pgtype v1.8.1 +## explicit; go 1.13 github.com/jackc/pgtype -# github.com/jackc/pgx/v4 v4.11.0 -## explicit +# github.com/jackc/pgx/v4 v4.13.0 +## explicit; go 1.13 github.com/jackc/pgx/v4 github.com/jackc/pgx/v4/internal/sanitize github.com/jackc/pgx/v4/stdlib -# github.com/m3db/prometheus_client_golang v0.8.1 -## explicit -github.com/m3db/prometheus_client_golang/prometheus -github.com/m3db/prometheus_client_golang/prometheus/promhttp -# github.com/m3db/prometheus_client_model v0.1.0 -## explicit -github.com/m3db/prometheus_client_model/go -# github.com/m3db/prometheus_common v0.1.0 -## explicit -github.com/m3db/prometheus_common/expfmt -github.com/m3db/prometheus_common/internal/bitbucket.org/ww/goautoneg -github.com/m3db/prometheus_common/model -# github.com/m3db/prometheus_procfs v0.8.1 -## explicit -github.com/m3db/prometheus_procfs # github.com/matttproud/golang_protobuf_extensions v1.0.1 +## explicit github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mschoch/smat v0.2.0 +## explicit; go 1.13 github.com/mschoch/smat -# github.com/philhofer/fwd v1.0.0 -github.com/philhofer/fwd # github.com/pkg/errors v0.9.1 +## explicit github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 +## explicit github.com/pmezard/go-difflib/difflib +# github.com/prometheus/client_golang v1.11.0 +## explicit; go 1.13 +github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/internal +github.com/prometheus/client_golang/prometheus/promhttp +# github.com/prometheus/client_model v0.2.0 +## explicit; go 1.9 +github.com/prometheus/client_model/go +# github.com/prometheus/common v0.26.0 +## explicit; go 1.11 +github.com/prometheus/common/expfmt +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +github.com/prometheus/common/model +# github.com/prometheus/procfs v0.6.0 +## explicit; go 1.13 +github.com/prometheus/procfs +github.com/prometheus/procfs/internal/fs +github.com/prometheus/procfs/internal/util # github.com/rubenv/sql-migrate v0.0.0-20210408115534-a32ed26c37ea -## explicit +## explicit; go 1.16 github.com/rubenv/sql-migrate github.com/rubenv/sql-migrate/sqlparse -# github.com/steveyen/gtreap v0.1.0 -## explicit -github.com/steveyen/gtreap # github.com/stretchr/testify v1.7.0 -## explicit +## explicit; go 1.13 github.com/stretchr/testify/assert -# github.com/tinylib/msgp v1.1.2 -## explicit -github.com/tinylib/msgp/msgp -# github.com/uber-go/tally v3.3.17+incompatible -## explicit -github.com/uber-go/tally -github.com/uber-go/tally/prometheus -# github.com/willf/bitset v1.1.10 -github.com/willf/bitset +# github.com/twmb/murmur3 v1.1.6 +## explicit; go 1.11 +github.com/twmb/murmur3 +# github.com/uber-go/tally/v4 v4.0.1 +## explicit; go 1.15 +github.com/uber-go/tally/v4 +github.com/uber-go/tally/v4/internal/identity +github.com/uber-go/tally/v4/prometheus # github.com/ziutek/mymysql v1.5.4 ## explicit -# go.etcd.io/bbolt v1.3.5 -go.etcd.io/bbolt -# go.uber.org/atomic v1.7.0 -## explicit +# go.uber.org/atomic v1.9.0 +## explicit; go 1.13 go.uber.org/atomic # go.uber.org/multierr v1.6.0 +## explicit; go 1.12 go.uber.org/multierr -# go.uber.org/zap v1.16.0 -## explicit +# go.uber.org/zap v1.19.1 +## explicit; go 1.13 go.uber.org/zap go.uber.org/zap/buffer go.uber.org/zap/internal/bufferpool @@ -233,23 +237,26 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zaptest/observer -# golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf -## explicit +# golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 +## explicit; go 1.17 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish golang.org/x/crypto/pbkdf2 -# golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 +# golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 +## explicit; go 1.11 golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c +# golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 +## explicit; go 1.17 golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.3.6 +# golang.org/x/text v0.3.7 +## explicit; go 1.17 golang.org/x/text/cases golang.org/x/text/collate golang.org/x/text/internal @@ -265,14 +272,14 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# google.golang.org/genproto v0.0.0-20210224155714-063164c882e6 -## explicit +# google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 +## explicit; go 1.11 google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.37.0 -## explicit +# google.golang.org/grpc v1.41.0 +## explicit; go 1.14 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -310,6 +317,7 @@ google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype +google.golang.org/grpc/internal/xds/env google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer @@ -319,10 +327,10 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap # google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 -## explicit +## explicit; go 1.9 google.golang.org/grpc/cmd/protoc-gen-go-grpc # google.golang.org/protobuf v1.27.1 -## explicit +## explicit; go 1.9 google.golang.org/protobuf/cmd/protoc-gen-go google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo google.golang.org/protobuf/compiler/protogen @@ -364,12 +372,14 @@ google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb google.golang.org/protobuf/types/pluginpb # gopkg.in/gorp.v1 v1.7.2 +## explicit gopkg.in/gorp.v1 # gopkg.in/natefinch/lumberjack.v2 v2.0.0-20190411184413-94d9e492cc53 ## explicit gopkg.in/natefinch/lumberjack.v2 # gopkg.in/yaml.v2 v2.4.0 -## explicit +## explicit; go 1.15 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b +## explicit gopkg.in/yaml.v3 -- GitLab